code stringlengths 1 2.01M | repo_name stringlengths 3 62 | path stringlengths 1 267 | language stringclasses 231
values | license stringclasses 13
values | size int64 1 2.01M |
|---|---|---|---|---|---|
<%@ include file="IncludeTop.jsp" %>
<%@ taglib prefix="html" uri="http://jakarta.apache.org/struts/tags-html" %>
<html:form action="/shop/newAccount.do" styleId="workingAccountForm" method="post" >
<html:hidden name="workingAccountForm" property="validate" value="newAccount"/>
<TABLE cellpadding=10 cellspacing=0 align=center border=1 bgcolor="#dddddd"><TR><TD>
<FONT color=darkgreen><H3>User Information</H3></FONT>
<TABLE bgcolor="#008800" border=0 cellpadding=3 cellspacing=1 bgcolor="#FFFF88">
<TR bgcolor="#FFFF88"><TD>
User ID:</TD><TD><html:text name="workingAccountForm" property="account.username" />
</TD></TR><TR bgcolor="#FFFF88"><TD>
New password:</TD><TD><html:password name="workingAccountForm" property="account.password"/>
</TD></TR><TR bgcolor="#FFFF88"><TD>
Repeat password:</TD><TD> <html:password name="workingAccountForm" property="repeatedPassword"/>
</TD></TR>
</TABLE>
<%@ include file="IncludeAccountFields.jsp" %>
</TABLE>
<BR><CENTER>
<input border=0 type="image" src="../images/button_submit.gif" />
</CENTER>
</html:form>
<%@ include file="IncludeBottom.jsp" %> | zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/NewAccountForm.jsp | Java Server Pages | art | 1,130 |
<center>
<a href="<c:url value="/shop/viewCategory.do?categoryId=FISH"/>">
<img border="0" src="../images/sm_fish.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/viewCategory.do?categoryId=DOGS"/>">
<img border="0" src="../images/sm_dogs.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/viewCategory.do?categoryId=REPTILES"/>">
<img border="0" src="../images/sm_reptiles.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/viewCategory.do?categoryId=CATS"/>">
<img border="0" src="../images/sm_cats.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/viewCategory.do?categoryId=BIRDS"/>">
<img border="0" src="../images/sm_birds.gif" /></a>
</center>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/IncludeQuickHeader.jsp | Java Server Pages | art | 826 |
<c:if test="${!empty accountForm.myList}">
<p> </p>
<table align="right" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"><td>
<font size="4"><b>Pet Favorites</b></font>
<font size="2"><i><br />Shop for more of your <br />favorite pets here.</i></font>
</td></tr>
<tr bgcolor="#FFFF88">
<td>
<c:forEach var="product" items="${accountForm.myList.pageList}" >
<a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<c:out value="${product.name}"/>
</a>
<br/>
<font size="2">(<c:out value="${product.productId}"/>)</font>
<br/>
</c:forEach>
</td>
</tr>
<tr>
<td>
<c:if test="${!accountForm.myList.firstPage}">
<a href="viewCart.do?page=previous"><font color="white"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!accountForm.myList.lastPage}">
<a href="viewCart.do?page=next"><font color="white"><B>Next >></B></font></a>
</c:if>
</td>
</tr>
</table>
</c:if>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/IncludeMyList.jsp | Java Server Pages | art | 1,068 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/index.do"/>"><b><font color="BLACK" size="2"><< Main Menu</font></b></a>
</td></tr>
</table>
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"> <td> </td> <td><b>Product ID</b></td> <td><b>Name</b></td> </tr>
<c:forEach var="product" items="${productList.pageList}">
<tr bgcolor="#FFFF88">
<td><a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<c:out value="${product.description}" escapeXml="false"/></a></td>
<td><b><a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<font color="BLACK"><c:out value="${product.productId}"/></font>
</a></b></td>
<td><c:out value="${product.name}"/></td>
</tr>
</c:forEach>
<tr>
<td>
<c:if test="${!productList.firstPage}">
<a href="?page=previous"><font color="white"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!productList.lastPage}">
<a href="?page=next"><font color="white"><B>Next >></B></font></a>
</c:if>
</td>
</tr>
</table>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/SearchProducts.jsp | Java Server Pages | art | 1,375 |
<%@ include file="IncludeTop.jsp" %>
<%@ taglib prefix="html" uri="http://jakarta.apache.org/struts/tags-html" %>
<html:form action="/shop/newOrder.do" styleId="workingOrderForm" method="post" >
<TABLE bgcolor="#008800" border=0 cellpadding=3 cellspacing=1 bgcolor="#FFFF88">
<TR bgcolor="#FFFF88"><TD colspan=2>
<FONT color=GREEN size=4><B>Shipping Address</B></FONT>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
First name:</TD><TD><html:text name="workingOrderForm" property="order.shipToFirstName" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Last name:</TD><TD><html:text name="workingOrderForm" property="order.shipToLastName" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 1:</TD><TD><html:text size="40" name="workingOrderForm" property="order.shipAddress1" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 2:</TD><TD><html:text size="40" name="workingOrderForm" property="order.shipAddress2" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
City: </TD><TD><html:text name="workingOrderForm" property="order.shipCity" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
State:</TD><TD><html:text size="4" name="workingOrderForm" property="order.shipState" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Zip:</TD><TD><html:text size="10" name="workingOrderForm" property="order.shipZip" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Country: </TD><TD><html:text size="15" name="workingOrderForm" property="order.shipCountry" />
</TD></TR>
</TABLE>
<P>
<input type="image" src="../images/button_submit.gif">
</html:form>
<%@ include file="IncludeBottom.jsp" %> | zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/ShippingForm.jsp | Java Server Pages | art | 1,574 |
<%@ include file="IncludeTop.jsp" %>
<center>
<font size="4"><b>My Orders</b></font>
</center>
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"> <td><b>Order ID</b></td> <td><b>Date</b></td> <td><b>Total Price</b></td> </tr>
<c:forEach var="order" items="${orderList}">
<tr bgcolor="#FFFF88">
<td><b><a href="<c:url value="/shop/viewOrder.do"><c:param name="orderId" value="${order.orderId}"/></c:url>">
<font color="BLACK"><c:out value="${order.orderId}"/></font>
</a></b></td>
<td><fmt:formatDate value="${order.orderDate}" pattern="yyyy/MM/dd hh:mm:ss"/></td>
<td><fmt:formatNumber value="${order.totalPrice}" pattern="$#,##0.00"/></td>
</tr>
</c:forEach>
</table>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/ListOrders.jsp | Java Server Pages | art | 809 |
<%@ include file="IncludeTop.jsp" %>
<%@ taglib prefix="html" uri="http://jakarta.apache.org/struts/tags-html" %>
<html:form styleId="workingAccountForm" method="post" action="/shop/editAccount.do">
<html:hidden name="workingAccountForm" property="validate" value="editAccount" />
<html:hidden name="workingAccountForm" property="account.username" />
<table cellpadding="10" cellspacing="0" align="center" border="1" bgcolor="#dddddd"><tr><td>
<font color="darkgreen"><h3>User Information</h3></font>
<table border="0" cellpadding="3" cellspacing="1" bgcolor="#FFFF88">
<tr bgcolor="#FFFF88"><td>
User ID:</td><td><c:out value="${workingAccountForm.account.username}"/>
</td></tr><tr bgcolor="#FFFF88"><td>
New password:</td><td><html:password name="workingAccountForm" property="account.password" />
</td></tr><tr bgcolor="#FFFF88"><td>
Repeat password:</td><td> <html:password name="workingAccountForm" property="repeatedPassword" />
</td></tr>
</table>
<%@ include file="IncludeAccountFields.jsp" %>
</td></tr></table>
<br /><center>
<input border="0" type="image" src="../images/button_submit.gif" name="submit" value="Save Account Information" />
</center>
</html:form>
<p>
<center><b><a href="<c:url value="/shop/listOrders.do"/>">My Orders</a></b></center>
<%@ include file="IncludeBottom.jsp" %></p>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/EditAccountForm.jsp | Java Server Pages | art | 1,349 |
<%@ include file="IncludeTop.jsp" %>
<H3>Error!</H3>
<B><c:out value="${message}" default="No further information was provided."/></B>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/Error.jsp | Java Server Pages | art | 185 |
<%@ include file="IncludeTop.jsp" %>
<c:if test="${!empty message}">
<b><font color="RED"><c:url value="${message}"/></font></b>
</c:if>
<form action="<c:url value="/shop/signon.do"/>" method="POST">
<c:if test="${!empty signonForwardAction}">
<input type="hidden" name="forwardAction" value="<c:url value="${signonForwardAction}"/>"/>
</c:if>
<table align="center" border="0">
<tr>
<td colspan="2">Please enter your username and password.
<br /> </td>
</tr>
<tr>
<td>Username:</td>
<td><input type="text" name="username" value="j2ee" /></td>
</tr>
<tr>
<td>Password:</td>
<td><input type="password" name="password" value="j2ee" /></td>
</tr>
<tr>
<td> </td>
<td><input type="image" border="0" src="../images/button_submit.gif" name="update" /></td>
</tr>
</table>
</form>
<center>
<a href="<c:url value="/shop/newAccountForm.do"/>">
<img border="0" src="../images/button_register_now.gif" />
</a>
</center>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/SignonForm.jsp | Java Server Pages | art | 1,009 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/viewCategory.do"><c:param name="categoryId" value="${product.categoryId}"/></c:url>">
<b><font color="BLACK" size="2"><< <c:out value="${product.name}"/></font></b>
</a>
</td></tr>
</table>
<p>
<center>
<b><font size="4"><c:out value="${product.name}"/></font></b>
</center>
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"> <td><b>Item ID</b></td> <td><b>Product ID</b></td> <td><b>Description</b></td> <td><b>List Price</b></td> <td> </td> </tr>
<c:forEach var="item" items="${itemList.pageList}">
<tr bgcolor="#FFFF88">
<td><b>
<a href="<c:url value="/shop/viewItem.do"><c:param name="itemId" value="${item.itemId}"/></c:url>">
<c:out value="${item.itemId}"/>
</a></b></td>
<td><c:out value="${item.productId}"/></td>
<td>
<c:out value="${item.attribute1}"/>
<c:out value="${item.attribute2}"/>
<c:out value="${item.attribute3}"/>
<c:out value="${item.attribute4}"/>
<c:out value="${item.attribute5}"/>
<c:out value="${product.name}"/>
</td>
<td><fmt:formatNumber value="${item.listPrice}" pattern="$#,##0.00"/></td>
<td><a href="<c:url value="/shop/addItemToCart.do"><c:param name="workingItemId" value="${item.itemId}"/></c:url>">
<img border="0" src="../images/button_add_to_cart.gif"/>
</a></td>
</tr>
</c:forEach>
<tr><td>
<c:if test="${!itemList.firstPage}">
<a href="?page=previous"><font color="white"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!itemList.lastPage}">
<a href="?page=next"><font color="white"><B>Next >></B></font></a>
</c:if>
</td></tr>
</table>
<%@ include file="IncludeBottom.jsp" %></p></p>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/Product.jsp | Java Server Pages | art | 1,922 |
<br>
<c:if test="${!empty accountForm.account.username}">
<c:if test="${accountForm.account.bannerOption}">
<table align="center" background="../images/bkg-topbar.gif" cellpadding="5" width="100%">
<tr><td>
<center>
<c:out value="${accountForm.account.bannerName}" escapeXml="false"/>
</center>
</td></tr>
</table>
</c:if>
</c:if>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/IncludeBanner.jsp | Java Server Pages | art | 373 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<b><font color="BLACK" size="2"><< <c:out value="${product.name}"/></font></b>
</a>
</td></tr>
</table>
<p>
<table align="center" bgcolor="#008800" cellspacing="2" cellpadding="3" border="0" width="60%">
<tr bgcolor="#FFFF88">
<td bgcolor="#FFFFFF">
<c:out value="${product.description}" escapeXml="false"/>
</td>
</tr>
<tr bgcolor="#FFFF88">
<td width="100%" bgcolor="#cccccc">
<b><c:out value="${item.itemId}"/></b>
</td>
</tr><tr bgcolor="#FFFF88">
<td>
<b><font size="4">
<c:out value="${item.attribute1}"/>
<c:out value="${item.attribute2}"/>
<c:out value="${item.attribute3}"/>
<c:out value="${item.attribute4}"/>
<c:out value="${item.attribute5}"/>
<c:out value="${product.name}"/>
</font></b>
</td></tr>
<tr bgcolor="#FFFF88"><td>
<font size="3"><i><c:out value="${product.name}"/></i></font>
</td></tr>
<tr bgcolor="#FFFF88"><td>
<c:if test="${item.quantity <= 0}">
<font color="RED" size="2"><i>Back ordered.</i></font>
</c:if>
<c:if test="${item.quantity > 0}">
<font size="2"><fmt:formatNumber value="${item.quantity}"/> in stock.</font>
</c:if>
</td></tr>
<tr bgcolor="#FFFF88"><td>
<fmt:formatNumber value="${item.listPrice}" pattern="$#,##0.00" />
</td></tr>
<tr bgcolor="#FFFF88"><td>
<a href="<c:url value="/shop/addItemToCart.do"><c:param name="workingItemId" value="${item.itemId}"/></c:url>">
<img border="0" src="../images/button_add_to_cart.gif"/>
</a>
</td></tr>
</table>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/Item.jsp | Java Server Pages | art | 1,814 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/index.do"/>"><b><font color="BLACK" size="2"><< Main Menu</font></b></a>
</td></tr>
</table>
<p>
<center>
<b>Please confirm the information below and then press continue...</b>
</center>
<p>
<table width="60%" align="center" border="0" cellpadding="3" cellspacing="1" bgcolor="#FFFF88">
<tr bgcolor="#FFFF88"><td align="center" colspan="2">
<font size="4"><b>Order</b></font>
<br /><font size="3"><b><fmt:formatDate value="${workingOrderForm.order.orderDate}" pattern="yyyy/MM/dd hh:mm:ss" /></b></font>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<font color="GREEN" size="4"><b>Billing Address</b></font>
</td></tr>
<tr bgcolor="#FFFF88"><td>
First name:</td><td><c:out value="${workingOrderForm.order.billToFirstName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Last name:</td><td><c:out value="${workingOrderForm.order.billToLastName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 1:</td><td><c:out value="${workingOrderForm.order.billAddress1}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 2:</td><td><c:out value="${workingOrderForm.order.billAddress2}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
City: </td><td><c:out value="${workingOrderForm.order.billCity}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
State:</td><td><c:out value="${workingOrderForm.order.billState}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Zip:</td><td><c:out value="${workingOrderForm.order.billZip}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Country: </td><td><c:out value="${workingOrderForm.order.billCountry}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<font color="GREEN" size="4"><b>Shipping Address</b></font>
</td></tr><tr bgcolor="#FFFF88"><td>
First name:</td><td><c:out value="${workingOrderForm.order.shipToFirstName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Last name:</td><td><c:out value="${workingOrderForm.order.shipToLastName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 1:</td><td><c:out value="${workingOrderForm.order.shipAddress1}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 2:</td><td><c:out value="${workingOrderForm.order.shipAddress2}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
City: </td><td><c:out value="${workingOrderForm.order.shipCity}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
State:</td><td><c:out value="${workingOrderForm.order.shipState}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Zip:</td><td><c:out value="${workingOrderForm.order.shipZip}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Country: </td><td><c:out value="${workingOrderForm.order.shipCountry}"/>
</td></tr>
</table>
<p>
<center><a href="<c:url value="/shop/newOrder.do?confirmed=true"/>"><img border="0" src="../images/button_continue.gif" /></a></center>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/ConfirmOrder.jsp | Java Server Pages | art | 2,954 |
<FONT color=darkgreen><H3>Account Information</H3></FONT>
<TABLE bgcolor="#008800" border=0 cellpadding=3 cellspacing=1 bgcolor="#FFFF88">
<TR bgcolor="#FFFF88"><TD>
First name:</TD><TD><html:text name="workingAccountForm" property="account.firstName" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Last name:</TD><TD><html:text name="workingAccountForm" property="account.lastName" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Email:</TD><TD><html:text size="40" name="workingAccountForm" property="account.email" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Phone:</TD><TD><html:text name="workingAccountForm" property="account.phone" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 1:</TD><TD><html:text size="40" name="workingAccountForm" property="account.address1" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 2:</TD><TD><html:text size="40" name="workingAccountForm" property="account.address2" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
City: </TD><TD><html:text name="workingAccountForm" property="account.city" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
State:</TD><TD><html:text size="4" name="workingAccountForm" property="account.state" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Zip:</TD><TD><html:text size="10" name="workingAccountForm" property="account.zip" />
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Country: </TD><TD><html:text size="15" name="workingAccountForm" property="account.country" />
</TD></TR>
</TABLE>
<FONT color=darkgreen><H3>Profile Information</H3></FONT>
<TABLE bgcolor="#008800" border=0 cellpadding=3 cellspacing=1 >
<TR bgcolor="#FFFF88"><TD>
Language Preference:</TD><TD>
<html:select name="workingAccountForm" property="account.languagePreference">
<html:options name="workingAccountForm" property="languages" />
</html:select>
</TD></TR><TR bgcolor="#FFFF88"><TD>
Favourite Category:</TD><TD>
<html:select name="workingAccountForm" property="account.favouriteCategoryId">
<html:options name="workingAccountForm" property="categories" />
</html:select>
</TD></TR><TR bgcolor="#FFFF88"><TD colspan=2>
<html:checkbox name="workingAccountForm" property="account.listOption"/> Enable MyList
</TD></TR><TR bgcolor="#FFFF88"><TD colspan=2>
<html:checkbox name="workingAccountForm" property="account.bannerOption"/> Enable MyBanner
</TD></TR>
</TABLE>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/struts/IncludeAccountFields.jsp | Java Server Pages | art | 2,332 |
<%@ include file="IncludeTop.jsp" %>
<table border="0" width="100%" cellspacing="0" cellpadding="0">
<tr><td valign="top" width="20%" align="left">
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/index.do"/>"><b><font color="BLACK" size="2"><< Main Menu</font></b></a>
</td></tr>
</table>
</td><td valign="top" align="center">
<h2 align="center">Shopping Cart</h2>
<form action="<c:url value="/shop/updateCartQuantities.do"/>" method="post">
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="5">
<tr bgcolor="#cccccc">
<td><b>Item ID</b></td> <td><b>Product ID</b></td> <td><b>Description</b></td> <td><b>In Stock?</b></td> <td><b>Quantity</b></td> <td><b>List Price</b></td> <td><b>Total Cost</b></td> <td> </td>
</tr>
<c:if test="${cart.numberOfItems == 0}">
<tr bgcolor="#FFFF88"><td colspan="8"><b>Your cart is empty.</b></td></tr>
</c:if>
<c:forEach var="cartItem" items="${cart.cartItemList.pageList}">
<tr bgcolor="#FFFF88">
<td><b>
<a href="<c:url value="/shop/viewItem.do"><c:param name="itemId" value="${cartItem.item.itemId}"/></c:url>">
<c:out value="${cartItem.item.itemId}"/>
</a></b></td>
<td><c:out value="${cartItem.item.productId}"/></td>
<td>
<c:out value="${cartItem.item.attribute1}"/>
<c:out value="${cartItem.item.attribute2}"/>
<c:out value="${cartItem.item.attribute3}"/>
<c:out value="${cartItem.item.attribute4}"/>
<c:out value="${cartItem.item.attribute5}"/>
<c:out value="${cartItem.item.product.name}"/>
</td>
<td align="center"><c:out value="${cartItem.inStock}"/></td>
<td align="center">
<input type="text" size="3" name="<c:out value="${cartItem.item.itemId}"/>" value="<c:out value="${cartItem.quantity}"/>" />
</td>
<td align="right"><fmt:formatNumber value="${cartItem.item.listPrice}" pattern="$#,##0.00" /></td>
<td align="right"><fmt:formatNumber value="${cartItem.totalPrice}" pattern="$#,##0.00" /></td>
<td><a href="<c:url value="/shop/removeItemFromCart.do"><c:param name="workingItemId" value="${cartItem.item.itemId}"/></c:url>">
<img border="0" src="../images/button_remove.gif" />
</a></td>
</tr>
</c:forEach>
<tr bgcolor="#FFFF88">
<td colspan="7" align="right">
<b>Sub Total: <fmt:formatNumber value="${cart.subTotal}" pattern="$#,##0.00" /></b><br/>
<input type="image" border="0" src="../images/button_update_cart.gif" name="update" />
</td>
<td> </td>
</tr>
</table>
<center>
<c:if test="${!cart.cartItemList.firstPage}">
<a href="<c:url value="viewCart.do?page=previousCart"/>"><font color="green"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!cart.cartItemList.lastPage}">
<a href="<c:url value="viewCart.do?page=nextCart"/>"><font color="green"><B>Next >></B></font></a>
</c:if>
</center>
</form>
<c:if test="${cart.numberOfItems > 0}">
<br /><center><a href="<c:url value="/shop/checkout.do"/>"><img border="0" src="../images/button_checkout.gif" /></a></center>
</c:if>
</td>
<td valign="top" width="20%" align="right">
<c:if test="${!empty userSession.account.username}">
<c:if test="${userSession.account.listOption}">
<%@ include file="IncludeMyList.jsp" %>
</c:if>
</c:if>
</td>
</tr>
</table>
<%@ include file="IncludeBanner.jsp" %>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/Cart.jsp | Java Server Pages | art | 3,480 |
<%@ include file="IncludeTop.jsp" %>
<table border="0" cellspacing="0" width="100%">
<tbody>
<tr>
<td valign="top" width="100%">
<table align="left" border="0" cellspacing="0" width="80%">
<tbody>
<tr>
<td valign="top">
<!-- SIDEBAR -->
<table bgcolor="#FFFF88" border="0" cellspacing="0" cellpadding="5" width="200">
<tbody>
<tr>
<td>
<c:if test="${!empty userSession.account}">
<b><i><font size="2" color="BLACK">Welcome <c:out value="${userSession.account.firstName}"/>!</font></i></b>
</c:if>
</td>
</tr>
<tr>
<td>
<a href="<c:url value="/shop/viewCategory.do?categoryId=FISH"/>">
<img border="0" src="../images/fish_icon.gif" /></a>
</td>
</tr>
<tr>
<td>
<a href="<c:url value="/shop/viewCategory.do?categoryId=DOGS"/>">
<img border="0" src="../images/dogs_icon.gif" /></a>
</td>
</tr>
<tr>
<td>
<a href="<c:url value="/shop/viewCategory.do?categoryId=CATS"/>">
<img border="0" src="../images/cats_icon.gif" /></a>
</td>
</tr>
<tr>
<td>
<a href="<c:url value="/shop/viewCategory.do?categoryId=REPTILES"/>">
<img border="0" src="../images/reptiles_icon.gif" /></a>
</td>
</tr>
<tr>
<td>
<a href="<c:url value="/shop/viewCategory.do?categoryId=BIRDS"/>">
<img border="0" src="../images/birds_icon.gif" /></a>
</td>
</tr>
</tbody>
</table>
</td>
<td align="center" bgcolor="white" height="300" width="100%">
<!-- MAIN IMAGE -->
<map name="estoremap"><area alt="Birds" coords="72,2,280,250" href="viewCategory.do?categoryId=BIRDS" shape="RECT" />
<area alt="Fish" coords="2,180,72,250" href="viewCategory.do?categoryId=FISH" shape="RECT" />
<area alt="Dogs" coords="60,250,130,320" href="viewCategory.do?categoryId=DOGS" shape="RECT" />
<area alt="Reptiles" coords="140,270,210,340" href="viewCategory.do?categoryId=REPTILES" shape="RECT" />
<area alt="Cats" coords="225,240,295,310" href="viewCategory.do?categoryId=CATS" shape="RECT" />
<area alt="Birds" coords="280,180,350,250" href="viewCategory.do?categoryId=BIRDS" shape="RECT" /></map>
<img border="0" height="355" src="../images/splash.gif" align="center" usemap="#estoremap" width="350" />
</td></tr></tbody></table></td></tr>
</tbody>
</table>
<%@ include file="IncludeBanner.jsp" %>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/index.jsp | Java Server Pages | art | 3,027 |
<%@ include file="IncludeTop.jsp" %>
<table border="0" width="100%" cellspacing="0" cellpadding="0">
<tr><td valign="top" width="20%" align="left">
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/viewCart.do"/>"><b><font color="BLACK" size="2"><< Shopping Cart</font></b></a>
</td></tr>
</table>
</td>
<td valign="top" align="center">
<h2 align="center">Checkout Summary</h2>
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="5">
<tr bgcolor="#cccccc">
<td><b>Item ID</b></td> <td><b>Product ID</b></td> <td><b>Description</b></td> <td><b>In Stock?</b></td> <td><b>Quantity</b></td> <td><b>List Price</b></td> <td><b>Total Cost</b></td>
</tr>
<c:forEach var="cartItem" items="${cart.cartItemList.pageList}">
<tr bgcolor="#FFFF88">
<td><b>
<a href="<c:url value="/shop/viewItem.do"><c:param name="itemId" value="${cartItem.item.itemId}"/></c:url>">
<c:out value="${cartItem.item.itemId}"/>
</a></b></td>
<td><c:out value="${cartItem.item.productId}"/></td>
<td>
<c:out value="${cartItem.item.attribute1}"/>
<c:out value="${cartItem.item.attribute2}"/>
<c:out value="${cartItem.item.attribute3}"/>
<c:out value="${cartItem.item.attribute4}"/>
<c:out value="${cartItem.item.attribute5}"/>
<c:out value="${cartItem.item.product.name}"/>
</td>
<td align="center"><c:out value="${cartItem.inStock}"/></td>
<td align="center">
<c:out value="${cartItem.quantity}"/>
</td>
<td align="right"><fmt:formatNumber value="${cartItem.item.listPrice}" pattern="$#,##0.00" /></td>
<td align="right"><fmt:formatNumber value="${cartItem.totalPrice}" pattern="$#,##0.00" /></td>
</tr>
</c:forEach>
<tr bgcolor="#FFFF88">
<td colspan="7" align="right">
<b>Sub Total: <fmt:formatNumber value="${cart.subTotal}" pattern="$#,##0.00" /></b><br />
</td>
</tr>
</table>
<center>
<c:if test="${!cart.cartItemList.firstPage}">
<a href="checkout.do?page=previousCart"><font color="green"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!cart.cartItemList.lastPage}">
<a href="checkout.do?page=nextCart"><font color="green"><B>Next >></B></font></a>
</c:if>
</center>
<br>
<center><a href="<c:url value="/shop/newOrder.do"/>"><img border="0" src="../images/button_continue.gif" /></a></center>
</td>
<td valign="top" width="20%" align="right"> </td>
</tr>
</table>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/Checkout.jsp | Java Server Pages | art | 2,574 |
<p> </p>
<table align="center">
<tr>
<td>
<a href="http://www.springframework.org">
<img border="0" align="center" src="../images/poweredBySpring.gif" alt="Powered by the Spring Framework"/>
</a>
</td>
<td>
<a href="http://www.ibatis.com">
<img border="0" align="center" src="../images/poweredby.gif" alt="Powered by iBATIS"/>
</a>
</td>
</tr>
</table>
<p align="center">
(Currently running on the Spring web tier)
</p>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/IncludeBottom.jsp | Java Server Pages | art | 480 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/index.do"/>"><b><font color="BLACK" size="2"><< Main Menu</font></b></a>
</td></tr>
<tr><td bgcolor="#FFFF88">
<%--
<html:link paramId="orderId" paramName="order" paramProperty="orderId" page="/shop/viewOrder.do?webservice=true"><b><font color="BLACK" size="2">Use Web Service</font></b></c:url>
--%>
</td></tr>
</table>
<c:if test="${!empty message}">
<center><b><c:out value="${message}"/></b></center>
</c:if>
<p>
<table width="60%" align="center" border="0" cellpadding="3" cellspacing="1" bgcolor="#FFFF88">
<tr bgcolor="#FFFF88"><td align="center" colspan="2">
<font size="4"><b>Order #<c:out value="${order.orderId}"/></b></font>
<br /><font size="3"><b><fmt:formatDate value="${order.orderDate}" pattern="yyyy/MM/dd hh:mm:ss" /></b></font>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<font color="GREEN" size="4"><b>Payment Details</b></font>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Card Type:</td><td>
<c:out value="${order.cardType}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Card Number:</td><td><c:out value="${order.creditCard}"/> <font color="red" size="2">* Fake number!</font>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Expiry Date (MM/YYYY):</td><td><c:out value="${order.expiryDate}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<font color="GREEN" size="4"><b>Billing Address</b></font>
</td></tr>
<tr bgcolor="#FFFF88"><td>
First name:</td><td><c:out value="${order.billToFirstName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Last name:</td><td><c:out value="${order.billToLastName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 1:</td><td><c:out value="${order.billAddress1}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 2:</td><td><c:out value="${order.billAddress2}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
City: </td><td><c:out value="${order.billCity}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
State:</td><td><c:out value="${order.billState}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Zip:</td><td><c:out value="${order.billZip}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Country: </td><td><c:out value="${order.billCountry}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<font color="GREEN" size="4"><b>Shipping Address</b></font>
</td></tr><tr bgcolor="#FFFF88"><td>
First name:</td><td><c:out value="${order.shipToFirstName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Last name:</td><td><c:out value="${order.shipToLastName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 1:</td><td><c:out value="${order.shipAddress1}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 2:</td><td><c:out value="${order.shipAddress2}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
City: </td><td><c:out value="${order.shipCity}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
State:</td><td><c:out value="${order.shipState}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Zip:</td><td><c:out value="${order.shipZip}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Country: </td><td><c:out value="${order.shipCountry}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Courier: </td><td><c:out value="${order.courier}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<b><font color="GREEN" size="4">Status:</font> <c:out value="${order.status}"/></b>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<table width="100%" align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC">
<td><b>Item ID</b></td>
<td><b>Description</b></td>
<td><b>Quantity</b></td>
<td><b>Price</b></td>
<td><b>Total Cost</b></td>
</tr>
<c:forEach var="lineItem" items="${order.lineItems}">
<tr bgcolor="#FFFF88">
<td><b><a href="<c:url value="/shop/viewItem.do"><c:param name="itemId" value="${lineItem.itemId}"/></c:url>">
<font color="BLACK"><c:out value="${lineItem.itemId}"/></font>
</a></b></td>
<td>
<c:out value="${lineItem.item.attribute1}"/>
<c:out value="${lineItem.item.attribute2}"/>
<c:out value="${lineItem.item.attribute3}"/>
<c:out value="${lineItem.item.attribute4}"/>
<c:out value="${lineItem.item.attribute5}"/>
<c:out value="${lineItem.item.product.name}"/>
</td>
<td><c:out value="${lineItem.quantity}"/></td>
<td align="right"><fmt:formatNumber value="${lineItem.unitPrice}" pattern="$#,##0.00"/></td>
<td align="right"><fmt:formatNumber value="${lineItem.totalPrice}" pattern="$#,##0.00"/></td>
</tr>
</c:forEach>
<tr bgcolor="#FFFF88">
<td colspan="5" align="right"><b>Total: <fmt:formatNumber value="${order.totalPrice}" pattern="$#,##0.00"/></b></td>
</tr>
</table>
</td></tr>
</table>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/ViewOrder.jsp | Java Server Pages | art | 4,846 |
<%@ page contentType="text/html" %>
<%@ taglib prefix="c" uri="http://java.sun.com/jsp/jstl/core" %>
<%@ taglib prefix="fmt" uri="http://java.sun.com/jsp/jstl/fmt" %>
<html><head><title>JPetStore Demo</title>
<meta content="text/html; charset=windows-1252" http-equiv="Content-Type" />
<META HTTP-EQUIV="Cache-Control" CONTENT="max-age=0">
<META HTTP-EQUIV="Cache-Control" CONTENT="no-cache">
<meta http-equiv="expires" content="0">
<META HTTP-EQUIV="Expires" CONTENT="Tue, 01 Jan 1980 1:00:00 GMT">
<META HTTP-EQUIV="Pragma" CONTENT="no-cache">
</head>
<body bgcolor="white">
<table background="../images/bkg-topbar.gif" border="0" cellspacing="0" cellpadding="5" width="100%">
<tbody>
<tr>
<td><a href="<c:url value="/shop/index.do"/>"><img border="0" src="../images/logo-topbar.gif" /></a></td>
<td align="right"><a href="<c:url value="/shop/viewCart.do"/>"><img border="0" name="img_cart" src="../images/cart.gif" /></a>
<img border="0" src="../images/separator.gif" />
<c:if test="${empty userSession.account}" >
<a href="<c:url value="/shop/signonForm.do"/>"><img border="0" name="img_signin" src="../images/sign-in.gif" /></a>
</c:if>
<c:if test="${!empty userSession.account}" >
<a href="<c:url value="/shop/signoff.do"/>"><img border="0" name="img_signout" src="../images/sign-out.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/editAccount.do"/>"><img border="0" name="img_myaccount" src="../images/my_account.gif" /></a>
</c:if>
<img border="0" src="../images/separator.gif" /><a href="../help.html"><img border="0" name="img_help" src="../images/help.gif" /></a>
</td>
<td align="left" valign="bottom">
<form action="<c:url value="/shop/searchProducts.do"/>" method="post">
<input type="hidden" name="search" value="true"/>
<input name="keyword" size="14" /> <input border="0" src="../images/search.gif" type="image"/>
</form>
</td>
</tr>
</tbody>
</table>
<%@ include file="IncludeQuickHeader.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/IncludeTop.jsp | Java Server Pages | art | 2,108 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/index.do"/>"><b><font color="BLACK" size="2"/><< Main Menu</font></b></a>
</td></tr>
</table>
<p>
<center>
<h2><c:out value="${category.name}"/></h2>
</center>
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"> <td><b>Product ID</b></td> <td><b>Name</b></td> </tr>
<c:forEach var="product" items="${productList.pageList}">
<tr bgcolor="#FFFF88">
<td><b><a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<font color="BLACK"><c:out value="${product.productId}"/></font>
</a></b></td>
<td><c:out value="${product.name}"/></td>
</tr>
</c:forEach>
<tr><td>
<c:if test="${!productList.firstPage}">
<a href="?page=previous"><font color="white"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!productList.lastPage}">
<a href="?page=next"><font color="white"><B>Next >></B></font></a>
</c:if>
</td></tr>
</table>
<%@ include file="IncludeBottom.jsp" %></p></p>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/Category.jsp | Java Server Pages | art | 1,241 |
<%@ include file="IncludeTop.jsp" %>
<%@ taglib prefix="spring" uri="http://www.springframework.org/tags" %>
<!-- Support for Spring errors holder -->
<spring:bind path="orderForm.*">
<c:forEach var="error" items="${status.errorMessages}">
<B><FONT color=RED>
<BR><c:out value="${error}"/>
</FONT></B>
</c:forEach>
</spring:bind>
<form action="<c:url value="/shop/newOrder.do"/>" method="post">
<TABLE bgcolor="#008800" border=0 cellpadding=3 cellspacing=1 bgcolor="#FFFF88">
<TR bgcolor="#FFFF88"><TD colspan=2>
<FONT color=GREEN size=4><B>Payment Details</B></FONT>
</TD></TR><TR bgcolor="#FFFF88"><TD>
Card Type:</TD><TD>
<spring:bind path="orderForm.order.cardType">
<select name="<c:out value="${status.expression}"/>">
<c:forEach var="cardType" items="${creditCardTypes}">
<option <c:if test="${cardType == status.value}">selected</c:if> value="<c:out value="${cardType}"/>">
<c:out value="${cardType}"/></option>
</c:forEach>
</select>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Card Number:</TD><TD>
<spring:bind path="orderForm.order.creditCard">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
<font color=red size=2>* Use a fake number!</font>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Expiry Date (MM/YYYY):</TD><TD>
<spring:bind path="orderForm.order.expiryDate">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD colspan=2>
<FONT color=GREEN size=4><B>Billing Address</B></FONT>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
First name:</TD><TD>
<spring:bind path="orderForm.order.billToFirstName">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Last name:</TD><TD>
<spring:bind path="orderForm.order.billToLastName">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 1:</TD><TD>
<spring:bind path="orderForm.order.billAddress1">
<input type="text" size="40" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 2:</TD><TD>
<spring:bind path="orderForm.order.billAddress2">
<input type="text" size="40" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
City: </TD><TD>
<spring:bind path="orderForm.order.billCity">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
State:</TD><TD>
<spring:bind path="orderForm.order.billState">
<input type="text" size="4" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Zip:</TD><TD>
<spring:bind path="orderForm.order.billZip">
<input type="text" size="10" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Country:</TD><TD>
<spring:bind path="orderForm.order.billCountry">
<input type="text" size="15" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD colspan=2>
<spring:bind path="orderForm.shippingAddressRequired">
<input type="checkbox" name="<c:out value="${status.expression}"/>" value="true" <c:if test="${status.value}">checked</c:if>>
Ship to different address...
</spring:bind>
</TD></TR>
</TABLE>
<P>
<input type="image" src="../images/button_submit.gif">
</form>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/NewOrderForm.jsp | Java Server Pages | art | 4,103 |
<center>
<a href="<c:url value="/shop/viewCategory.do?categoryId=FISH"/>">
<img border="0" src="../images/sm_fish.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/viewCategory.do?categoryId=DOGS"/>">
<img border="0" src="../images/sm_dogs.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/viewCategory.do?categoryId=REPTILES"/>">
<img border="0" src="../images/sm_reptiles.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/viewCategory.do?categoryId=CATS"/>">
<img border="0" src="../images/sm_cats.gif" /></a>
<img border="0" src="../images/separator.gif" />
<a href="<c:url value="/shop/viewCategory.do?categoryId=BIRDS"/>">
<img border="0" src="../images/sm_birds.gif" /></a>
</center>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/IncludeQuickHeader.jsp | Java Server Pages | art | 826 |
<c:if test="${!empty userSession.myList}">
<p> </p>
<table align="right" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"><td>
<font size="4"><b>Pet Favorites</b></font>
<font size="2"><i><br />Shop for more of your <br />favorite pets here.</i></font>
</td></tr>
<tr bgcolor="#FFFF88">
<td>
<c:forEach var="product" items="${userSession.myList.pageList}" >
<a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<c:out value="${product.name}"/>
</a>
<br/>
<font size="2">(<c:out value="${product.productId}"/>)</font>
<br/>
</c:forEach>
</td>
</tr>
<tr>
<td>
<c:if test="${!userSession.myList.firstPage}">
<a href="viewCart.do?page=previous"><font color="white"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!userSession.myList.lastPage}">
<a href="viewCart.do?page=next"><font color="white"><B>Next >></B></font></a>
</c:if>
</td>
</tr>
</table>
</c:if>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/IncludeMyList.jsp | Java Server Pages | art | 1,068 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/index.do"/>"><b><font color="BLACK" size="2"><< Main Menu</font></b></a>
</td></tr>
</table>
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"> <td> </td> <td><b>Product ID</b></td> <td><b>Name</b></td> </tr>
<c:forEach var="product" items="${productList.pageList}">
<tr bgcolor="#FFFF88">
<td><a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<c:out value="${product.description}" escapeXml="false"/></a></td>
<td><b><a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<font color="BLACK"><c:out value="${product.productId}"/></font>
</a></b></td>
<td><c:out value="${product.name}"/></td>
</tr>
</c:forEach>
<tr>
<td>
<c:if test="${!productList.firstPage}">
<a href="?page=previous"><font color="white"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!productList.lastPage}">
<a href="?page=next"><font color="white"><B>Next >></B></font></a>
</c:if>
</td>
</tr>
</table>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/SearchProducts.jsp | Java Server Pages | art | 1,375 |
<%@ include file="IncludeTop.jsp" %>
<%@ taglib prefix="spring" uri="http://www.springframework.org/tags" %>
<!-- Support for Spring errors holder -->
<spring:bind path="orderForm.*">
<c:forEach var="error" items="${status.errorMessages}">
<B><FONT color=RED>
<BR><c:out value="${error}"/>
</FONT></B>
</c:forEach>
</spring:bind>
<form action="<c:url value="/shop/newOrder.do"/>" method="post">
<TABLE bgcolor="#008800" border=0 cellpadding=3 cellspacing=1 bgcolor="#FFFF88">
<TR bgcolor="#FFFF88"><TD colspan=2>
<FONT color=GREEN size=4><B>Shipping Address</B></FONT>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
First name:</TD><TD>
<spring:bind path="orderForm.order.shipToFirstName">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Last name:</TD><TD>
<spring:bind path="orderForm.order.shipToLastName">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 1:</TD><TD>
<spring:bind path="orderForm.order.shipAddress1">
<input type="text" size="40" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 2:</TD><TD>
<spring:bind path="orderForm.order.shipAddress2">
<input type="text" size="40" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
City: </TD><TD>
<spring:bind path="orderForm.order.shipCity">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
State:</TD><TD>
<spring:bind path="orderForm.order.shipState">
<input type="text" size="4" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Zip:</TD><TD>
<spring:bind path="orderForm.order.shipZip">
<input type="text" size="10" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Country: </TD><TD>
<spring:bind path="orderForm.order.shipCountry">
<input type="text" size="15" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
</TABLE>
<P>
<input type="image" src="../images/button_submit.gif">
</form>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/ShippingForm.jsp | Java Server Pages | art | 2,719 |
<%@ include file="IncludeTop.jsp" %>
<center>
<font size="4"><b>My Orders</b></font>
</center>
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"> <td><b>Order ID</b></td> <td><b>Date</b></td> <td><b>Total Price</b></td> </tr>
<c:forEach var="order" items="${orderList}">
<tr bgcolor="#FFFF88">
<td><b><a href="<c:url value="/shop/viewOrder.do"><c:param name="orderId" value="${order.orderId}"/></c:url>">
<font color="BLACK"><c:out value="${order.orderId}"/></font>
</a></b></td>
<td><fmt:formatDate value="${order.orderDate}" pattern="yyyy/MM/dd hh:mm:ss"/></td>
<td><fmt:formatNumber value="${order.totalPrice}" pattern="$#,##0.00"/></td>
</tr>
</c:forEach>
</table>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/ListOrders.jsp | Java Server Pages | art | 809 |
<%@ include file="IncludeTop.jsp" %>
<%@ taglib prefix="spring" uri="http://www.springframework.org/tags" %>
<!-- Support for Spring errors object -->
<spring:bind path="accountForm.*">
<c:forEach var="error" items="${status.errorMessages}">
<B><FONT color=RED>
<BR><c:out value="${error}"/>
</FONT></B>
</c:forEach>
</spring:bind>
<c:if test="${accountForm.newAccount}">
<form action="<c:url value="/shop/newAccount.do"/>" method="post">
</c:if>
<c:if test="${!accountForm.newAccount}">
<form action="<c:url value="/shop/editAccount.do"/>" method="post">
</c:if>
<table cellpadding="10" cellspacing="0" align="center" border="1" bgcolor="#dddddd"><tr><td>
<font color="darkgreen"><h3>User Information</h3></font>
<table border="0" cellpadding="3" cellspacing="1" bgcolor="#FFFF88">
<tr bgcolor="#FFFF88"><td>
User ID:</td><td>
<c:if test="${accountForm.newAccount}">
<spring:bind path="accountForm.account.username">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</c:if>
<c:if test="${!accountForm.newAccount}">
<c:out value="${accountForm.account.username}"/>
</c:if>
</td></tr><tr bgcolor="#FFFF88"><td>
New password:</td><td>
<spring:bind path="accountForm.account.password">
<input type="password" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</td></tr><tr bgcolor="#FFFF88"><td>
Repeat password:</td><td>
<spring:bind path="accountForm.repeatedPassword">
<input type="password" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</td></tr>
</table>
<%@ include file="IncludeAccountFields.jsp" %>
</td></tr></table>
<br /><center>
<input border="0" type="image" src="../images/button_submit.gif" name="submit" value="Save Account Information" />
</center>
</html:form>
<p>
<center><b><a href="<c:url value="/shop/listOrders.do"/>">My Orders</a></b></center>
<%@ include file="IncludeBottom.jsp" %></p>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/EditAccountForm.jsp | Java Server Pages | art | 2,102 |
<%@ include file="IncludeTop.jsp" %>
<H3>Error!</H3>
<B><c:out value="${message}" default="No further information was provided."/></B>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/Error.jsp | Java Server Pages | art | 185 |
<%@ include file="IncludeTop.jsp" %>
<c:if test="${!empty message}">
<b><font color="RED"><c:url value="${message}"/></font></b>
</c:if>
<form action="<c:url value="/shop/signon.do"/>" method="POST">
<c:if test="${!empty signonForwardAction}">
<input type="hidden" name="forwardAction" value="<c:url value="${signonForwardAction}"/>"/>
</c:if>
<table align="center" border="0">
<tr>
<td colspan="2">Please enter your username and password.
<br /> </td>
</tr>
<tr>
<td>Username:</td>
<td><input type="text" name="username" value="j2ee" /></td>
</tr>
<tr>
<td>Password:</td>
<td><input type="password" name="password" value="j2ee" /></td>
</tr>
<tr>
<td> </td>
<td><input type="image" border="0" src="../images/button_submit.gif" name="update" /></td>
</tr>
</table>
</form>
<center>
<a href="<c:url value="/shop/newAccount.do"/>">
<img border="0" src="../images/button_register_now.gif" />
</a>
</center>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/SignonForm.jsp | Java Server Pages | art | 1,005 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/viewCategory.do"><c:param name="categoryId" value="${product.categoryId}"/></c:url>">
<b><font color="BLACK" size="2"><< <c:out value="${product.name}"/></font></b>
</a>
</td></tr>
</table>
<p>
<center>
<b><font size="4"><c:out value="${product.name}"/></font></b>
</center>
<table align="center" bgcolor="#008800" border="0" cellspacing="2" cellpadding="3">
<tr bgcolor="#CCCCCC"> <td><b>Item ID</b></td> <td><b>Product ID</b></td> <td><b>Description</b></td> <td><b>List Price</b></td> <td> </td> </tr>
<c:forEach var="item" items="${itemList.pageList}">
<tr bgcolor="#FFFF88">
<td><b>
<a href="<c:url value="/shop/viewItem.do"><c:param name="itemId" value="${item.itemId}"/></c:url>">
<c:out value="${item.itemId}"/>
</a></b></td>
<td><c:out value="${item.productId}"/></td>
<td>
<c:out value="${item.attribute1}"/>
<c:out value="${item.attribute2}"/>
<c:out value="${item.attribute3}"/>
<c:out value="${item.attribute4}"/>
<c:out value="${item.attribute5}"/>
<c:out value="${product.name}"/>
</td>
<td><fmt:formatNumber value="${item.listPrice}" pattern="$#,##0.00"/></td>
<td><a href="<c:url value="/shop/addItemToCart.do"><c:param name="workingItemId" value="${item.itemId}"/></c:url>">
<img border="0" src="../images/button_add_to_cart.gif"/>
</a></td>
</tr>
</c:forEach>
<tr><td>
<c:if test="${!itemList.firstPage}">
<a href="?page=previous"><font color="white"><B><< Prev</B></font></a>
</c:if>
<c:if test="${!itemList.lastPage}">
<a href="?page=next"><font color="white"><B>Next >></B></font></a>
</c:if>
</td></tr>
</table>
<%@ include file="IncludeBottom.jsp" %></p></p>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/Product.jsp | Java Server Pages | art | 1,922 |
<br>
<c:if test="${userSession.account.bannerOption}">
<table align="center" background="../images/bkg-topbar.gif" cellpadding="5" width="100%">
<tr><td>
<center>
<c:out value="${userSession.account.bannerName}" escapeXml="false"/>
</center>
</td></tr>
</table>
</c:if>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/IncludeBanner.jsp | Java Server Pages | art | 300 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/viewProduct.do"><c:param name="productId" value="${product.productId}"/></c:url>">
<b><font color="BLACK" size="2"><< <c:out value="${product.name}"/></font></b>
</a>
</td></tr>
</table>
<p>
<table align="center" bgcolor="#008800" cellspacing="2" cellpadding="3" border="0" width="60%">
<tr bgcolor="#FFFF88">
<td bgcolor="#FFFFFF">
<c:out value="${product.description}" escapeXml="false"/>
</td>
</tr>
<tr bgcolor="#FFFF88">
<td width="100%" bgcolor="#cccccc">
<b><c:out value="${item.itemId}"/></b>
</td>
</tr><tr bgcolor="#FFFF88">
<td>
<b><font size="4">
<c:out value="${item.attribute1}"/>
<c:out value="${item.attribute2}"/>
<c:out value="${item.attribute3}"/>
<c:out value="${item.attribute4}"/>
<c:out value="${item.attribute5}"/>
<c:out value="${product.name}"/>
</font></b>
</td></tr>
<tr bgcolor="#FFFF88"><td>
<font size="3"><i><c:out value="${product.name}"/></i></font>
</td></tr>
<tr bgcolor="#FFFF88"><td>
<c:if test="${item.quantity <= 0}">
<font color="RED" size="2"><i>Back ordered.</i></font>
</c:if>
<c:if test="${item.quantity > 0}">
<font size="2"><fmt:formatNumber value="${item.quantity}"/> in stock.</font>
</c:if>
</td></tr>
<tr bgcolor="#FFFF88"><td>
<fmt:formatNumber value="${item.listPrice}" pattern="$#,##0.00" />
</td></tr>
<tr bgcolor="#FFFF88"><td>
<a href="<c:url value="/shop/addItemToCart.do"><c:param name="workingItemId" value="${item.itemId}"/></c:url>">
<img border="0" src="../images/button_add_to_cart.gif"/>
</a>
</td></tr>
</table>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/Item.jsp | Java Server Pages | art | 1,814 |
<%@ include file="IncludeTop.jsp" %>
<table align="left" bgcolor="#008800" border="0" cellspacing="2" cellpadding="2">
<tr><td bgcolor="#FFFF88">
<a href="<c:url value="/shop/index.do"/>"><b><font color="BLACK" size="2"><< Main Menu</font></b></a>
</td></tr>
</table>
<p>
<center>
<b>Please confirm the information below and then press continue...</b>
</center>
<p>
<table width="60%" align="center" border="0" cellpadding="3" cellspacing="1" bgcolor="#FFFF88">
<tr bgcolor="#FFFF88"><td align="center" colspan="2">
<font size="4"><b>Order</b></font>
<br /><font size="3"><b><fmt:formatDate value="${orderForm.order.orderDate}" pattern="yyyy/MM/dd hh:mm:ss" /></b></font>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<font color="GREEN" size="4"><b>Billing Address</b></font>
</td></tr>
<tr bgcolor="#FFFF88"><td>
First name:</td><td><c:out value="${orderForm.order.billToFirstName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Last name:</td><td><c:out value="${orderForm.order.billToLastName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 1:</td><td><c:out value="${orderForm.order.billAddress1}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 2:</td><td><c:out value="${orderForm.order.billAddress2}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
City: </td><td><c:out value="${orderForm.order.billCity}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
State:</td><td><c:out value="${orderForm.order.billState}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Zip:</td><td><c:out value="${orderForm.order.billZip}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Country: </td><td><c:out value="${orderForm.order.billCountry}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td colspan="2">
<font color="GREEN" size="4"><b>Shipping Address</b></font>
</td></tr><tr bgcolor="#FFFF88"><td>
First name:</td><td><c:out value="${orderForm.order.shipToFirstName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Last name:</td><td><c:out value="${orderForm.order.shipToLastName}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 1:</td><td><c:out value="${orderForm.order.shipAddress1}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Address 2:</td><td><c:out value="${orderForm.order.shipAddress2}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
City: </td><td><c:out value="${orderForm.order.shipCity}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
State:</td><td><c:out value="${orderForm.order.shipState}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Zip:</td><td><c:out value="${orderForm.order.shipZip}"/>
</td></tr>
<tr bgcolor="#FFFF88"><td>
Country: </td><td><c:out value="${orderForm.order.shipCountry}"/>
</td></tr>
</table>
<p>
<center><a href="<c:url value="/shop/newOrder.do?_finish=true"/>"><img border="0" src="../images/button_continue.gif" /></a></center>
<%@ include file="IncludeBottom.jsp" %>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/ConfirmOrder.jsp | Java Server Pages | art | 2,833 |
<FONT color=darkgreen><H3>Account Information</H3></FONT>
<TABLE bgcolor="#008800" border=0 cellpadding=3 cellspacing=1 bgcolor="#FFFF88">
<TR bgcolor="#FFFF88"><TD>
First name:</TD><TD>
<spring:bind path="accountForm.account.firstName">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Last name:</TD><TD>
<spring:bind path="accountForm.account.lastName">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Email:</TD><TD>
<spring:bind path="accountForm.account.email">
<input type="text" size="40" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Phone:</TD><TD>
<spring:bind path="accountForm.account.phone">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 1:</TD><TD>
<spring:bind path="accountForm.account.address1">
<input type="text" size="40" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Address 2:</TD><TD>
<spring:bind path="accountForm.account.address2">
<input type="text" size="40" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
City: </TD><TD>
<spring:bind path="accountForm.account.city">
<input type="text" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
State:</TD><TD>
<spring:bind path="accountForm.account.state">
<input type="text" size="4" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Zip:</TD><TD>
<spring:bind path="accountForm.account.zip">
<input type="text" size="10" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
<TR bgcolor="#FFFF88"><TD>
Country: </TD><TD>
<spring:bind path="accountForm.account.country">
<input type="text" size="15" name="<c:out value="${status.expression}"/>" value="<c:out value="${status.value}"/>"/>
</spring:bind>
</TD></TR>
</TABLE>
<FONT color=darkgreen><H3>Profile Information</H3></FONT>
<TABLE bgcolor="#008800" border=0 cellpadding=3 cellspacing=1 >
<TR bgcolor="#FFFF88"><TD>
Language Preference:</TD><TD>
<spring:bind path="accountForm.account.languagePreference">
<select name="<c:out value="${status.expression}"/>">
<c:forEach var="language" items="${languages}">
<option <c:if test="${language == status.value}">selected</c:if> value="<c:out value="${language}"/>">
<c:out value="${language}"/></option>
</c:forEach>
</select>
</spring:bind>
</TD></TR><TR bgcolor="#FFFF88"><TD>
Favourite Category:</TD><TD>
<spring:bind path="accountForm.account.favouriteCategoryId">
<select name="<c:out value="${status.expression}"/>">
<c:forEach var="category" items="${categories}">
<option <c:if test="${category.categoryId == status.value}">selected</c:if> value="<c:out value="${category}"/>">
<c:out value="${category.name}"/></option>
</c:forEach>
</select>
</spring:bind>
</TD></TR><TR bgcolor="#FFFF88"><TD colspan=2>
<spring:bind path="accountForm.account.listOption">
<input type="checkbox" name="<c:out value="${status.expression}"/>" value="true" <c:if test="${status.value}">checked</c:if>>
Enable MyList
</spring:bind>
</TD></TR><TR bgcolor="#FFFF88"><TD colspan=2>
<spring:bind path="accountForm.account.bannerOption">
<input type="checkbox" name="<c:out value="${status.expression}"/>" value="true" <c:if test="${status.value}">checked</c:if>>
Enable MyBanner
</spring:bind>
</TD></TR>
</TABLE>
| zzh-simple-hr | Zjpetstore/webapp/WEB-INF/jsp/spring/IncludeAccountFields.jsp | Java Server Pages | art | 4,217 |
<HTML><HEAD><TITLE>JPetStore Demo</TITLE>
<META content="text/html; charset=windows-1252" http-equiv=Content-Type>
</HEAD>
<BODY bgColor=white>
<TABLE background="images/bkg-topbar.gif" border=0 cellSpacing=0 cellPadding=5 width="100%">
<TBODY>
<TR>
<TD><A href="shop/index.do"><IMG border=0
src="images/logo-topbar.gif"></A> </TD>
<TD align=right><A href="shop/viewCart.do"><IMG border=0
name=img_cart src="images/cart.gif"></A>
<IMG border=0 src="images/separator.gif">
<A href="shop/signonForm.do" >
<IMG border=0 name=img_signin src="images/sign-in.gif"></A>
<IMG border=0
src="images/separator.gif"> <A
href="help.html"
><IMG border=0
name=img_help src="images/help.gif"></A>
</TD> <TD align=left valign=bottom>
<FORM action="shop/searchProducts.do">
<INPUT name=keyword size=14> <INPUT border=0
src="images/search.gif"
type=image> </FORM>
</TD>
</TR>
</TBODY></TABLE>
<TABLE border=0 cellSpacing=0 height="85%" width="100%">
<TBODY>
<TR>
<TD vAlign=top><!doctype html public "-//w3c//dtd html 4.0 transitional//en">
<CENTER>
<H1>JPetStore Demo</H1>
<H3>By <a href="mailto:clinton.begin@ibatis.com">Clinton Begin</a></H3></CENTER>The JPetStore Demo
is an online pet store. Like most e-stores, you can browse and search the
product catalog, choose items to add to a shopping cart, amend the
shopping cart, and order the items in the shopping cart. You can perform
many of these actions without registering with or logging into the
application. However, before you can order items you must log in (sign in)
to the application. In order to sign in, you must have an account with the
application, which is created when you register (sign up) with the
application.
<P><A
href="#SigningUp">Signing
Up</A> <BR><A
href="#SigningIn">Signing
In</A> <BR><A
href="#Catalog">Working
with the Product Catalog</A> <BR> <A
href="#CatalogBrowsing">Browsing
the Catalog</A> <BR> <A
href="#CatalogSearching">Searching
the Catalog</A> <BR><A
href="#ShoppingCart">Working
with the Shopping Cart</A> <BR> <A
href="#ShoppingCartAdd">Adding
and Removing Items</A> <BR> <A
href="#ShoppingCartUpdate">Updating
the Quantity of an Item</A> <BR> <A
href="#Ordering">Ordering
Items</A> <BR><A
href="#OrderReview">Reviewing
an Order</A> <BR><A
href="#Issues">Known
Issues</A>
<H2><A name=SigningUp></A>Signing Up</H2>To sign up, click the Sign-in
link at the right end of the banner. Next, click the New User link in the
resulting page. Among other information, the signup page requires you to
provide a user identifier and password. This information is used to
identify your account and must be provided when signing in.
<H2><A name=SigningIn></A>Signing In</H2>You sign in to the application by
clicking the Sign-in link at the right end of the banner, filling in the
user identifier and password, and clicking the Submit button.
<P>You will also be redirected to the signin page when you try to place an
order and you have not signed in. Once you have signed in, you can return
to your shopping session by clicking the shopping cart icon at the right
end of the banner. <BR>
<H2><A name=Catalog></A>Working with the Product Catalog</H2>This section
describes how to browse and search the product catalog.
<H4><A name=CatalogBrowsing></A>Browsing the Catalog</H4>The pet store
catalog is organized hierarchically as follows: categories, products,
items.
<P>You list the pets in a category by clicking on the category name in the
left column of the main page, or by clicking on the picture representing
the category.
<P>Once you select a category, the pet store will display a list of
products within a category. Selecting a product displays a list of items
and their prices. Selecting a product item displays a text and visual
description of the item and the number of that item in stock.
<H4><A name=CatalogSearching></A>Searching the Catalog</H4>You search for
products by typing the product name in search field in the middle of the
banner.
<H2><A name=ShoppingCart></A>Working with the Shopping Cart</H2>
<H4><A name=ShoppingCartAdd></A>Adding and Removing Items</H4>You add an
item to your shopping cart by clicking the Add to Cart button to the right
of an item. This action also displays your shopping cart.
<P>You can remove the item by clicking the Remove button to the left of
the item.
<P>To continue shopping, you select a product category from the list under
the banner.
<H4><A name=ShoppingCartUpdate></A>Updating the Quantity of an
Item</H4>You adjust the quantity of an item by typing the quantity in the
item's Quantity field in the shopping cart and clicking the Update button.
<P>If the quantity of items requested is greater than that in stock, the
In Stock field in the shopping cart will show that the item is
backordered.
<H4><A name=Ordering></A>Ordering Items</H4>You order the items in the
shopping cart by selecting the Proceed to Checkout button. The pet store
will display a read-only list of the shopping cart contents. To proceed
with the checkout, click the Continue button.
<P>If you have not signed in, the application will display the signin
page, where you will need to provide your account name and password.
Otherwise, the application will display a page requesting payment and
shipping information. When you have filled in the required information,
you click the Submit button, and the application will display a read-only
page containing your billing and shipping address. If you need to
change any information, click your browser's Back button and enter the
correct information. To complete the order, you click the Continue button.
<H2><A name=OrderReview></A>Reviewing An Order</H2>The final screen
contains your order information.
<P>The application can be set up to send email confirmation of
orders. This option can only be set when the application is
deployed. See the installation instructions for further information.
<H2><A name=Issues></A>Known Issues</H2>None.</TD></TR>
<TR>
<TD vAlign=bottom></TD></TR>
<TR>
<TD vAlign=bottom>
<TABLE border=0 cellSpacing=0 width="100%" <tr>
<TBODY>
<TR>
<TD align=middle><FONT color=black size=+1>Implementation by <a href="mailto:clinton.begin@ibatis.com">Clinton Begin</a></FONT>
</TD></TR></TBODY></TABLE></TD></TR></TBODY></TABLE>
<P> </P>
<P align="center">
<a href="http://www.ibatis.com"><IMG border=0 align="center" src="images/poweredby.gif"></a>
</P>
</BODY>
</HTML>
| zzh-simple-hr | Zjpetstore/webapp/help.html | HTML | art | 7,344 |
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html><body>Top-level package.</body></html>
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/package.html | HTML | art | 821 |
package org.apache.lucene;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Lucene's package information, including version. **/
public final class LucenePackage {
private LucenePackage() {} // can't construct
/** Return Lucene's package, including version information. */
public static Package get() {
return LucenePackage.class.getPackage();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/LucenePackage.java | Java | art | 1,143 |
package org.apache.lucene.analysis;
import java.io.IOException;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.ArrayUtil;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This class converts alphabetic, numeric, and symbolic Unicode characters
* which are not in the first 127 ASCII characters (the "Basic Latin" Unicode
* block) into their ASCII equivalents, if one exists.
*
* Characters from the following Unicode blocks are converted; however, only
* those characters with reasonable ASCII alternatives are converted:
*
* <ul>
* <li>C1 Controls and Latin-1 Supplement: <a href="http://www.unicode.org/charts/PDF/U0080.pdf">http://www.unicode.org/charts/PDF/U0080.pdf</a>
* <li>Latin Extended-A: <a href="http://www.unicode.org/charts/PDF/U0100.pdf">http://www.unicode.org/charts/PDF/U0100.pdf</a>
* <li>Latin Extended-B: <a href="http://www.unicode.org/charts/PDF/U0180.pdf">http://www.unicode.org/charts/PDF/U0180.pdf</a>
* <li>Latin Extended Additional: <a href="http://www.unicode.org/charts/PDF/U1E00.pdf">http://www.unicode.org/charts/PDF/U1E00.pdf</a>
* <li>Latin Extended-C: <a href="http://www.unicode.org/charts/PDF/U2C60.pdf">http://www.unicode.org/charts/PDF/U2C60.pdf</a>
* <li>Latin Extended-D: <a href="http://www.unicode.org/charts/PDF/UA720.pdf">http://www.unicode.org/charts/PDF/UA720.pdf</a>
* <li>IPA Extensions: <a href="http://www.unicode.org/charts/PDF/U0250.pdf">http://www.unicode.org/charts/PDF/U0250.pdf</a>
* <li>Phonetic Extensions: <a href="http://www.unicode.org/charts/PDF/U1D00.pdf">http://www.unicode.org/charts/PDF/U1D00.pdf</a>
* <li>Phonetic Extensions Supplement: <a href="http://www.unicode.org/charts/PDF/U1D80.pdf">http://www.unicode.org/charts/PDF/U1D80.pdf</a>
* <li>General Punctuation: <a href="http://www.unicode.org/charts/PDF/U2000.pdf">http://www.unicode.org/charts/PDF/U2000.pdf</a>
* <li>Superscripts and Subscripts: <a href="http://www.unicode.org/charts/PDF/U2070.pdf">http://www.unicode.org/charts/PDF/U2070.pdf</a>
* <li>Enclosed Alphanumerics: <a href="http://www.unicode.org/charts/PDF/U2460.pdf">http://www.unicode.org/charts/PDF/U2460.pdf</a>
* <li>Dingbats: <a href="http://www.unicode.org/charts/PDF/U2700.pdf">http://www.unicode.org/charts/PDF/U2700.pdf</a>
* <li>Supplemental Punctuation: <a href="http://www.unicode.org/charts/PDF/U2E00.pdf">http://www.unicode.org/charts/PDF/U2E00.pdf</a>
* <li>Alphabetic Presentation Forms: <a href="http://www.unicode.org/charts/PDF/UFB00.pdf">http://www.unicode.org/charts/PDF/UFB00.pdf</a>
* <li>Halfwidth and Fullwidth Forms: <a href="http://www.unicode.org/charts/PDF/UFF00.pdf">http://www.unicode.org/charts/PDF/UFF00.pdf</a>
* </ul>
*
* See: <a href="http://en.wikipedia.org/wiki/Latin_characters_in_Unicode">http://en.wikipedia.org/wiki/Latin_characters_in_Unicode</a>
*
* The set of character conversions supported by this class is a superset of
* those supported by Lucene's {@link ISOLatin1AccentFilter} which strips
* accents from Latin1 characters. For example, 'à' will be replaced by
* 'a'.
*/
public final class ASCIIFoldingFilter extends TokenFilter {
public ASCIIFoldingFilter(TokenStream input)
{
super(input);
termAtt = addAttribute(TermAttribute.class);
}
private char[] output = new char[512];
private int outputPos;
private TermAttribute termAtt;
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
final char[] buffer = termAtt.termBuffer();
final int length = termAtt.termLength();
// If no characters actually require rewriting then we
// just return token as-is:
for(int i = 0 ; i < length ; ++i) {
final char c = buffer[i];
if (c >= '\u0080')
{
foldToASCII(buffer, length);
termAtt.setTermBuffer(output, 0, outputPos);
break;
}
}
return true;
} else {
return false;
}
}
/**
* Converts characters above ASCII to their ASCII equivalents. For example,
* accents are removed from accented characters.
* @param input The string to fold
* @param length The number of characters in the input string
*/
public void foldToASCII(char[] input, int length)
{
// Worst-case length required:
final int maxSizeNeeded = 4 * length;
if (output.length < maxSizeNeeded) {
output = new char[ArrayUtil.getNextSize(maxSizeNeeded)];
}
outputPos = 0;
for (int pos = 0 ; pos < length ; ++pos) {
final char c = input[pos];
// Quick test: if it's not in range then just keep current character
if (c < '\u0080') {
output[outputPos++] = c;
} else {
switch (c) {
case '\u00C0': // À [LATIN CAPITAL LETTER A WITH GRAVE]
case '\u00C1': // Á [LATIN CAPITAL LETTER A WITH ACUTE]
case '\u00C2': // Â [LATIN CAPITAL LETTER A WITH CIRCUMFLEX]
case '\u00C3': // Ã [LATIN CAPITAL LETTER A WITH TILDE]
case '\u00C4': // Ä [LATIN CAPITAL LETTER A WITH DIAERESIS]
case '\u00C5': // Å [LATIN CAPITAL LETTER A WITH RING ABOVE]
case '\u0100': // Ā [LATIN CAPITAL LETTER A WITH MACRON]
case '\u0102': // Ă [LATIN CAPITAL LETTER A WITH BREVE]
case '\u0104': // Ą [LATIN CAPITAL LETTER A WITH OGONEK]
case '\u018F': // Ə http://en.wikipedia.org/wiki/Schwa [LATIN CAPITAL LETTER SCHWA]
case '\u01CD': // Ǎ [LATIN CAPITAL LETTER A WITH CARON]
case '\u01DE': // Ǟ [LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON]
case '\u01E0': // Ǡ [LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON]
case '\u01FA': // Ǻ [LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE]
case '\u0200': // Ȁ [LATIN CAPITAL LETTER A WITH DOUBLE GRAVE]
case '\u0202': // Ȃ [LATIN CAPITAL LETTER A WITH INVERTED BREVE]
case '\u0226': // Ȧ [LATIN CAPITAL LETTER A WITH DOT ABOVE]
case '\u023A': // Ⱥ [LATIN CAPITAL LETTER A WITH STROKE]
case '\u1D00': // ᴀ [LATIN LETTER SMALL CAPITAL A]
case '\u1E00': // Ḁ [LATIN CAPITAL LETTER A WITH RING BELOW]
case '\u1EA0': // Ạ [LATIN CAPITAL LETTER A WITH DOT BELOW]
case '\u1EA2': // Ả [LATIN CAPITAL LETTER A WITH HOOK ABOVE]
case '\u1EA4': // Ấ [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE]
case '\u1EA6': // Ầ [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE]
case '\u1EA8': // Ẩ [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE]
case '\u1EAA': // Ẫ [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE]
case '\u1EAC': // Ậ [LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW]
case '\u1EAE': // Ắ [LATIN CAPITAL LETTER A WITH BREVE AND ACUTE]
case '\u1EB0': // Ằ [LATIN CAPITAL LETTER A WITH BREVE AND GRAVE]
case '\u1EB2': // Ẳ [LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE]
case '\u1EB4': // Ẵ [LATIN CAPITAL LETTER A WITH BREVE AND TILDE]
case '\u1EB6': // Ặ [LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW]
case '\u24B6': // Ⓐ [CIRCLED LATIN CAPITAL LETTER A]
case '\uFF21': // A [FULLWIDTH LATIN CAPITAL LETTER A]
output[outputPos++] = 'A';
break;
case '\u00E0': // à [LATIN SMALL LETTER A WITH GRAVE]
case '\u00E1': // á [LATIN SMALL LETTER A WITH ACUTE]
case '\u00E2': // â [LATIN SMALL LETTER A WITH CIRCUMFLEX]
case '\u00E3': // ã [LATIN SMALL LETTER A WITH TILDE]
case '\u00E4': // ä [LATIN SMALL LETTER A WITH DIAERESIS]
case '\u00E5': // å [LATIN SMALL LETTER A WITH RING ABOVE]
case '\u0101': // ā [LATIN SMALL LETTER A WITH MACRON]
case '\u0103': // ă [LATIN SMALL LETTER A WITH BREVE]
case '\u0105': // ą [LATIN SMALL LETTER A WITH OGONEK]
case '\u01CE': // ǎ [LATIN SMALL LETTER A WITH CARON]
case '\u01DF': // ǟ [LATIN SMALL LETTER A WITH DIAERESIS AND MACRON]
case '\u01E1': // ǡ [LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON]
case '\u01FB': // ǻ [LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE]
case '\u0201': // ȁ [LATIN SMALL LETTER A WITH DOUBLE GRAVE]
case '\u0203': // ȃ [LATIN SMALL LETTER A WITH INVERTED BREVE]
case '\u0227': // ȧ [LATIN SMALL LETTER A WITH DOT ABOVE]
case '\u0250': // ɐ [LATIN SMALL LETTER TURNED A]
case '\u0259': // ə [LATIN SMALL LETTER SCHWA]
case '\u025A': // ɚ [LATIN SMALL LETTER SCHWA WITH HOOK]
case '\u1D8F': // ᶏ [LATIN SMALL LETTER A WITH RETROFLEX HOOK]
case '\u1D95': // ᶕ [LATIN SMALL LETTER SCHWA WITH RETROFLEX HOOK]
case '\u1E01': // ạ [LATIN SMALL LETTER A WITH RING BELOW]
case '\u1E9A': // ả [LATIN SMALL LETTER A WITH RIGHT HALF RING]
case '\u1EA1': // ạ [LATIN SMALL LETTER A WITH DOT BELOW]
case '\u1EA3': // ả [LATIN SMALL LETTER A WITH HOOK ABOVE]
case '\u1EA5': // ấ [LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE]
case '\u1EA7': // ầ [LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE]
case '\u1EA9': // ẩ [LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE]
case '\u1EAB': // ẫ [LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE]
case '\u1EAD': // ậ [LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW]
case '\u1EAF': // ắ [LATIN SMALL LETTER A WITH BREVE AND ACUTE]
case '\u1EB1': // ằ [LATIN SMALL LETTER A WITH BREVE AND GRAVE]
case '\u1EB3': // ẳ [LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE]
case '\u1EB5': // ẵ [LATIN SMALL LETTER A WITH BREVE AND TILDE]
case '\u1EB7': // ặ [LATIN SMALL LETTER A WITH BREVE AND DOT BELOW]
case '\u2090': // ₐ [LATIN SUBSCRIPT SMALL LETTER A]
case '\u2094': // ₔ [LATIN SUBSCRIPT SMALL LETTER SCHWA]
case '\u24D0': // ⓐ [CIRCLED LATIN SMALL LETTER A]
case '\u2C65': // ⱥ [LATIN SMALL LETTER A WITH STROKE]
case '\u2C6F': // Ɐ [LATIN CAPITAL LETTER TURNED A]
case '\uFF41': // a [FULLWIDTH LATIN SMALL LETTER A]
output[outputPos++] = 'a';
break;
case '\uA732': // Ꜳ [LATIN CAPITAL LETTER AA]
output[outputPos++] = 'A';
output[outputPos++] = 'A';
break;
case '\u00C6': // Æ [LATIN CAPITAL LETTER AE]
case '\u01E2': // Ǣ [LATIN CAPITAL LETTER AE WITH MACRON]
case '\u01FC': // Ǽ [LATIN CAPITAL LETTER AE WITH ACUTE]
case '\u1D01': // ᴁ [LATIN LETTER SMALL CAPITAL AE]
output[outputPos++] = 'A';
output[outputPos++] = 'E';
break;
case '\uA734': // Ꜵ [LATIN CAPITAL LETTER AO]
output[outputPos++] = 'A';
output[outputPos++] = 'O';
break;
case '\uA736': // Ꜷ [LATIN CAPITAL LETTER AU]
output[outputPos++] = 'A';
output[outputPos++] = 'U';
break;
case '\uA738': // Ꜹ [LATIN CAPITAL LETTER AV]
case '\uA73A': // Ꜻ [LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR]
output[outputPos++] = 'A';
output[outputPos++] = 'V';
break;
case '\uA73C': // Ꜽ [LATIN CAPITAL LETTER AY]
output[outputPos++] = 'A';
output[outputPos++] = 'Y';
break;
case '\u249C': // ⒜ [PARENTHESIZED LATIN SMALL LETTER A]
output[outputPos++] = '(';
output[outputPos++] = 'a';
output[outputPos++] = ')';
break;
case '\uA733': // ꜳ [LATIN SMALL LETTER AA]
output[outputPos++] = 'a';
output[outputPos++] = 'a';
break;
case '\u00E6': // æ [LATIN SMALL LETTER AE]
case '\u01E3': // ǣ [LATIN SMALL LETTER AE WITH MACRON]
case '\u01FD': // ǽ [LATIN SMALL LETTER AE WITH ACUTE]
case '\u1D02': // ᴂ [LATIN SMALL LETTER TURNED AE]
output[outputPos++] = 'a';
output[outputPos++] = 'e';
break;
case '\uA735': // ꜵ [LATIN SMALL LETTER AO]
output[outputPos++] = 'a';
output[outputPos++] = 'o';
break;
case '\uA737': // ꜷ [LATIN SMALL LETTER AU]
output[outputPos++] = 'a';
output[outputPos++] = 'u';
break;
case '\uA739': // ꜹ [LATIN SMALL LETTER AV]
case '\uA73B': // ꜻ [LATIN SMALL LETTER AV WITH HORIZONTAL BAR]
output[outputPos++] = 'a';
output[outputPos++] = 'v';
break;
case '\uA73D': // ꜽ [LATIN SMALL LETTER AY]
output[outputPos++] = 'a';
output[outputPos++] = 'y';
break;
case '\u0181': // Ɓ [LATIN CAPITAL LETTER B WITH HOOK]
case '\u0182': // Ƃ [LATIN CAPITAL LETTER B WITH TOPBAR]
case '\u0243': // Ƀ [LATIN CAPITAL LETTER B WITH STROKE]
case '\u0299': // ʙ [LATIN LETTER SMALL CAPITAL B]
case '\u1D03': // ᴃ [LATIN LETTER SMALL CAPITAL BARRED B]
case '\u1E02': // Ḃ [LATIN CAPITAL LETTER B WITH DOT ABOVE]
case '\u1E04': // Ḅ [LATIN CAPITAL LETTER B WITH DOT BELOW]
case '\u1E06': // Ḇ [LATIN CAPITAL LETTER B WITH LINE BELOW]
case '\u24B7': // Ⓑ [CIRCLED LATIN CAPITAL LETTER B]
case '\uFF22': // B [FULLWIDTH LATIN CAPITAL LETTER B]
output[outputPos++] = 'B';
break;
case '\u0180': // ƀ [LATIN SMALL LETTER B WITH STROKE]
case '\u0183': // ƃ [LATIN SMALL LETTER B WITH TOPBAR]
case '\u0253': // ɓ [LATIN SMALL LETTER B WITH HOOK]
case '\u1D6C': // ᵬ [LATIN SMALL LETTER B WITH MIDDLE TILDE]
case '\u1D80': // ᶀ [LATIN SMALL LETTER B WITH PALATAL HOOK]
case '\u1E03': // ḃ [LATIN SMALL LETTER B WITH DOT ABOVE]
case '\u1E05': // ḅ [LATIN SMALL LETTER B WITH DOT BELOW]
case '\u1E07': // ḇ [LATIN SMALL LETTER B WITH LINE BELOW]
case '\u24D1': // ⓑ [CIRCLED LATIN SMALL LETTER B]
case '\uFF42': // b [FULLWIDTH LATIN SMALL LETTER B]
output[outputPos++] = 'b';
break;
case '\u249D': // ⒝ [PARENTHESIZED LATIN SMALL LETTER B]
output[outputPos++] = '(';
output[outputPos++] = 'b';
output[outputPos++] = ')';
break;
case '\u00C7': // Ç [LATIN CAPITAL LETTER C WITH CEDILLA]
case '\u0106': // Ć [LATIN CAPITAL LETTER C WITH ACUTE]
case '\u0108': // Ĉ [LATIN CAPITAL LETTER C WITH CIRCUMFLEX]
case '\u010A': // Ċ [LATIN CAPITAL LETTER C WITH DOT ABOVE]
case '\u010C': // Č [LATIN CAPITAL LETTER C WITH CARON]
case '\u0187': // Ƈ [LATIN CAPITAL LETTER C WITH HOOK]
case '\u023B': // Ȼ [LATIN CAPITAL LETTER C WITH STROKE]
case '\u0297': // ʗ [LATIN LETTER STRETCHED C]
case '\u1D04': // ᴄ [LATIN LETTER SMALL CAPITAL C]
case '\u1E08': // Ḉ [LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE]
case '\u24B8': // Ⓒ [CIRCLED LATIN CAPITAL LETTER C]
case '\uFF23': // C [FULLWIDTH LATIN CAPITAL LETTER C]
output[outputPos++] = 'C';
break;
case '\u00E7': // ç [LATIN SMALL LETTER C WITH CEDILLA]
case '\u0107': // ć [LATIN SMALL LETTER C WITH ACUTE]
case '\u0109': // ĉ [LATIN SMALL LETTER C WITH CIRCUMFLEX]
case '\u010B': // ċ [LATIN SMALL LETTER C WITH DOT ABOVE]
case '\u010D': // č [LATIN SMALL LETTER C WITH CARON]
case '\u0188': // ƈ [LATIN SMALL LETTER C WITH HOOK]
case '\u023C': // ȼ [LATIN SMALL LETTER C WITH STROKE]
case '\u0255': // ɕ [LATIN SMALL LETTER C WITH CURL]
case '\u1E09': // ḉ [LATIN SMALL LETTER C WITH CEDILLA AND ACUTE]
case '\u2184': // ↄ [LATIN SMALL LETTER REVERSED C]
case '\u24D2': // ⓒ [CIRCLED LATIN SMALL LETTER C]
case '\uA73E': // Ꜿ [LATIN CAPITAL LETTER REVERSED C WITH DOT]
case '\uA73F': // ꜿ [LATIN SMALL LETTER REVERSED C WITH DOT]
case '\uFF43': // c [FULLWIDTH LATIN SMALL LETTER C]
output[outputPos++] = 'c';
break;
case '\u249E': // ⒞ [PARENTHESIZED LATIN SMALL LETTER C]
output[outputPos++] = '(';
output[outputPos++] = 'c';
output[outputPos++] = ')';
break;
case '\u00D0': // Ð [LATIN CAPITAL LETTER ETH]
case '\u010E': // Ď [LATIN CAPITAL LETTER D WITH CARON]
case '\u0110': // Đ [LATIN CAPITAL LETTER D WITH STROKE]
case '\u0189': // Ɖ [LATIN CAPITAL LETTER AFRICAN D]
case '\u018A': // Ɗ [LATIN CAPITAL LETTER D WITH HOOK]
case '\u018B': // Ƌ [LATIN CAPITAL LETTER D WITH TOPBAR]
case '\u1D05': // ᴅ [LATIN LETTER SMALL CAPITAL D]
case '\u1D06': // ᴆ [LATIN LETTER SMALL CAPITAL ETH]
case '\u1E0A': // Ḋ [LATIN CAPITAL LETTER D WITH DOT ABOVE]
case '\u1E0C': // Ḍ [LATIN CAPITAL LETTER D WITH DOT BELOW]
case '\u1E0E': // Ḏ [LATIN CAPITAL LETTER D WITH LINE BELOW]
case '\u1E10': // Ḑ [LATIN CAPITAL LETTER D WITH CEDILLA]
case '\u1E12': // Ḓ [LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW]
case '\u24B9': // Ⓓ [CIRCLED LATIN CAPITAL LETTER D]
case '\uA779': // Ꝺ [LATIN CAPITAL LETTER INSULAR D]
case '\uFF24': // D [FULLWIDTH LATIN CAPITAL LETTER D]
output[outputPos++] = 'D';
break;
case '\u00F0': // ð [LATIN SMALL LETTER ETH]
case '\u010F': // ď [LATIN SMALL LETTER D WITH CARON]
case '\u0111': // đ [LATIN SMALL LETTER D WITH STROKE]
case '\u018C': // ƌ [LATIN SMALL LETTER D WITH TOPBAR]
case '\u0221': // ȡ [LATIN SMALL LETTER D WITH CURL]
case '\u0256': // ɖ [LATIN SMALL LETTER D WITH TAIL]
case '\u0257': // ɗ [LATIN SMALL LETTER D WITH HOOK]
case '\u1D6D': // ᵭ [LATIN SMALL LETTER D WITH MIDDLE TILDE]
case '\u1D81': // ᶁ [LATIN SMALL LETTER D WITH PALATAL HOOK]
case '\u1D91': // ᶑ [LATIN SMALL LETTER D WITH HOOK AND TAIL]
case '\u1E0B': // ḋ [LATIN SMALL LETTER D WITH DOT ABOVE]
case '\u1E0D': // ḍ [LATIN SMALL LETTER D WITH DOT BELOW]
case '\u1E0F': // ḏ [LATIN SMALL LETTER D WITH LINE BELOW]
case '\u1E11': // ḑ [LATIN SMALL LETTER D WITH CEDILLA]
case '\u1E13': // ḓ [LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW]
case '\u24D3': // ⓓ [CIRCLED LATIN SMALL LETTER D]
case '\uA77A': // ꝺ [LATIN SMALL LETTER INSULAR D]
case '\uFF44': // d [FULLWIDTH LATIN SMALL LETTER D]
output[outputPos++] = 'd';
break;
case '\u01C4': // DŽ [LATIN CAPITAL LETTER DZ WITH CARON]
case '\u01F1': // DZ [LATIN CAPITAL LETTER DZ]
output[outputPos++] = 'D';
output[outputPos++] = 'Z';
break;
case '\u01C5': // Dž [LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON]
case '\u01F2': // Dz [LATIN CAPITAL LETTER D WITH SMALL LETTER Z]
output[outputPos++] = 'D';
output[outputPos++] = 'z';
break;
case '\u249F': // ⒟ [PARENTHESIZED LATIN SMALL LETTER D]
output[outputPos++] = '(';
output[outputPos++] = 'd';
output[outputPos++] = ')';
break;
case '\u0238': // ȸ [LATIN SMALL LETTER DB DIGRAPH]
output[outputPos++] = 'd';
output[outputPos++] = 'b';
break;
case '\u01C6': // dž [LATIN SMALL LETTER DZ WITH CARON]
case '\u01F3': // dz [LATIN SMALL LETTER DZ]
case '\u02A3': // ʣ [LATIN SMALL LETTER DZ DIGRAPH]
case '\u02A5': // ʥ [LATIN SMALL LETTER DZ DIGRAPH WITH CURL]
output[outputPos++] = 'd';
output[outputPos++] = 'z';
break;
case '\u00C8': // È [LATIN CAPITAL LETTER E WITH GRAVE]
case '\u00C9': // É [LATIN CAPITAL LETTER E WITH ACUTE]
case '\u00CA': // Ê [LATIN CAPITAL LETTER E WITH CIRCUMFLEX]
case '\u00CB': // Ë [LATIN CAPITAL LETTER E WITH DIAERESIS]
case '\u0112': // Ē [LATIN CAPITAL LETTER E WITH MACRON]
case '\u0114': // Ĕ [LATIN CAPITAL LETTER E WITH BREVE]
case '\u0116': // Ė [LATIN CAPITAL LETTER E WITH DOT ABOVE]
case '\u0118': // Ę [LATIN CAPITAL LETTER E WITH OGONEK]
case '\u011A': // Ě [LATIN CAPITAL LETTER E WITH CARON]
case '\u018E': // Ǝ [LATIN CAPITAL LETTER REVERSED E]
case '\u0190': // Ɛ [LATIN CAPITAL LETTER OPEN E]
case '\u0204': // Ȅ [LATIN CAPITAL LETTER E WITH DOUBLE GRAVE]
case '\u0206': // Ȇ [LATIN CAPITAL LETTER E WITH INVERTED BREVE]
case '\u0228': // Ȩ [LATIN CAPITAL LETTER E WITH CEDILLA]
case '\u0246': // Ɇ [LATIN CAPITAL LETTER E WITH STROKE]
case '\u1D07': // ᴇ [LATIN LETTER SMALL CAPITAL E]
case '\u1E14': // Ḕ [LATIN CAPITAL LETTER E WITH MACRON AND GRAVE]
case '\u1E16': // Ḗ [LATIN CAPITAL LETTER E WITH MACRON AND ACUTE]
case '\u1E18': // Ḙ [LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW]
case '\u1E1A': // Ḛ [LATIN CAPITAL LETTER E WITH TILDE BELOW]
case '\u1E1C': // Ḝ [LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE]
case '\u1EB8': // Ẹ [LATIN CAPITAL LETTER E WITH DOT BELOW]
case '\u1EBA': // Ẻ [LATIN CAPITAL LETTER E WITH HOOK ABOVE]
case '\u1EBC': // Ẽ [LATIN CAPITAL LETTER E WITH TILDE]
case '\u1EBE': // Ế [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE]
case '\u1EC0': // Ề [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE]
case '\u1EC2': // Ể [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE]
case '\u1EC4': // Ễ [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE]
case '\u1EC6': // Ệ [LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW]
case '\u24BA': // Ⓔ [CIRCLED LATIN CAPITAL LETTER E]
case '\u2C7B': // ⱻ [LATIN LETTER SMALL CAPITAL TURNED E]
case '\uFF25': // E [FULLWIDTH LATIN CAPITAL LETTER E]
output[outputPos++] = 'E';
break;
case '\u00E8': // è [LATIN SMALL LETTER E WITH GRAVE]
case '\u00E9': // é [LATIN SMALL LETTER E WITH ACUTE]
case '\u00EA': // ê [LATIN SMALL LETTER E WITH CIRCUMFLEX]
case '\u00EB': // ë [LATIN SMALL LETTER E WITH DIAERESIS]
case '\u0113': // ē [LATIN SMALL LETTER E WITH MACRON]
case '\u0115': // ĕ [LATIN SMALL LETTER E WITH BREVE]
case '\u0117': // ė [LATIN SMALL LETTER E WITH DOT ABOVE]
case '\u0119': // ę [LATIN SMALL LETTER E WITH OGONEK]
case '\u011B': // ě [LATIN SMALL LETTER E WITH CARON]
case '\u01DD': // ǝ [LATIN SMALL LETTER TURNED E]
case '\u0205': // ȅ [LATIN SMALL LETTER E WITH DOUBLE GRAVE]
case '\u0207': // ȇ [LATIN SMALL LETTER E WITH INVERTED BREVE]
case '\u0229': // ȩ [LATIN SMALL LETTER E WITH CEDILLA]
case '\u0247': // ɇ [LATIN SMALL LETTER E WITH STROKE]
case '\u0258': // ɘ [LATIN SMALL LETTER REVERSED E]
case '\u025B': // ɛ [LATIN SMALL LETTER OPEN E]
case '\u025C': // ɜ [LATIN SMALL LETTER REVERSED OPEN E]
case '\u025D': // ɝ [LATIN SMALL LETTER REVERSED OPEN E WITH HOOK]
case '\u025E': // ɞ [LATIN SMALL LETTER CLOSED REVERSED OPEN E]
case '\u029A': // ʚ [LATIN SMALL LETTER CLOSED OPEN E]
case '\u1D08': // ᴈ [LATIN SMALL LETTER TURNED OPEN E]
case '\u1D92': // ᶒ [LATIN SMALL LETTER E WITH RETROFLEX HOOK]
case '\u1D93': // ᶓ [LATIN SMALL LETTER OPEN E WITH RETROFLEX HOOK]
case '\u1D94': // ᶔ [LATIN SMALL LETTER REVERSED OPEN E WITH RETROFLEX HOOK]
case '\u1E15': // ḕ [LATIN SMALL LETTER E WITH MACRON AND GRAVE]
case '\u1E17': // ḗ [LATIN SMALL LETTER E WITH MACRON AND ACUTE]
case '\u1E19': // ḙ [LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW]
case '\u1E1B': // ḛ [LATIN SMALL LETTER E WITH TILDE BELOW]
case '\u1E1D': // ḝ [LATIN SMALL LETTER E WITH CEDILLA AND BREVE]
case '\u1EB9': // ẹ [LATIN SMALL LETTER E WITH DOT BELOW]
case '\u1EBB': // ẻ [LATIN SMALL LETTER E WITH HOOK ABOVE]
case '\u1EBD': // ẽ [LATIN SMALL LETTER E WITH TILDE]
case '\u1EBF': // ế [LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE]
case '\u1EC1': // ề [LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE]
case '\u1EC3': // ể [LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE]
case '\u1EC5': // ễ [LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE]
case '\u1EC7': // ệ [LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW]
case '\u2091': // ₑ [LATIN SUBSCRIPT SMALL LETTER E]
case '\u24D4': // ⓔ [CIRCLED LATIN SMALL LETTER E]
case '\u2C78': // ⱸ [LATIN SMALL LETTER E WITH NOTCH]
case '\uFF45': // e [FULLWIDTH LATIN SMALL LETTER E]
output[outputPos++] = 'e';
break;
case '\u24A0': // ⒠ [PARENTHESIZED LATIN SMALL LETTER E]
output[outputPos++] = '(';
output[outputPos++] = 'e';
output[outputPos++] = ')';
break;
case '\u0191': // Ƒ [LATIN CAPITAL LETTER F WITH HOOK]
case '\u1E1E': // Ḟ [LATIN CAPITAL LETTER F WITH DOT ABOVE]
case '\u24BB': // Ⓕ [CIRCLED LATIN CAPITAL LETTER F]
case '\uA730': // ꜰ [LATIN LETTER SMALL CAPITAL F]
case '\uA77B': // Ꝼ [LATIN CAPITAL LETTER INSULAR F]
case '\uA7FB': // ꟻ [LATIN EPIGRAPHIC LETTER REVERSED F]
case '\uFF26': // F [FULLWIDTH LATIN CAPITAL LETTER F]
output[outputPos++] = 'F';
break;
case '\u0192': // ƒ [LATIN SMALL LETTER F WITH HOOK]
case '\u1D6E': // ᵮ [LATIN SMALL LETTER F WITH MIDDLE TILDE]
case '\u1D82': // ᶂ [LATIN SMALL LETTER F WITH PALATAL HOOK]
case '\u1E1F': // ḟ [LATIN SMALL LETTER F WITH DOT ABOVE]
case '\u1E9B': // ẛ [LATIN SMALL LETTER LONG S WITH DOT ABOVE]
case '\u24D5': // ⓕ [CIRCLED LATIN SMALL LETTER F]
case '\uA77C': // ꝼ [LATIN SMALL LETTER INSULAR F]
case '\uFF46': // f [FULLWIDTH LATIN SMALL LETTER F]
output[outputPos++] = 'f';
break;
case '\u24A1': // ⒡ [PARENTHESIZED LATIN SMALL LETTER F]
output[outputPos++] = '(';
output[outputPos++] = 'f';
output[outputPos++] = ')';
break;
case '\uFB00': // ff [LATIN SMALL LIGATURE FF]
output[outputPos++] = 'f';
output[outputPos++] = 'f';
break;
case '\uFB03': // ffi [LATIN SMALL LIGATURE FFI]
output[outputPos++] = 'f';
output[outputPos++] = 'f';
output[outputPos++] = 'i';
break;
case '\uFB04': // ffl [LATIN SMALL LIGATURE FFL]
output[outputPos++] = 'f';
output[outputPos++] = 'f';
output[outputPos++] = 'l';
break;
case '\uFB01': // fi [LATIN SMALL LIGATURE FI]
output[outputPos++] = 'f';
output[outputPos++] = 'i';
break;
case '\uFB02': // fl [LATIN SMALL LIGATURE FL]
output[outputPos++] = 'f';
output[outputPos++] = 'l';
break;
case '\u011C': // Ĝ [LATIN CAPITAL LETTER G WITH CIRCUMFLEX]
case '\u011E': // Ğ [LATIN CAPITAL LETTER G WITH BREVE]
case '\u0120': // Ġ [LATIN CAPITAL LETTER G WITH DOT ABOVE]
case '\u0122': // Ģ [LATIN CAPITAL LETTER G WITH CEDILLA]
case '\u0193': // Ɠ [LATIN CAPITAL LETTER G WITH HOOK]
case '\u01E4': // Ǥ [LATIN CAPITAL LETTER G WITH STROKE]
case '\u01E5': // ǥ [LATIN SMALL LETTER G WITH STROKE]
case '\u01E6': // Ǧ [LATIN CAPITAL LETTER G WITH CARON]
case '\u01E7': // ǧ [LATIN SMALL LETTER G WITH CARON]
case '\u01F4': // Ǵ [LATIN CAPITAL LETTER G WITH ACUTE]
case '\u0262': // ɢ [LATIN LETTER SMALL CAPITAL G]
case '\u029B': // ʛ [LATIN LETTER SMALL CAPITAL G WITH HOOK]
case '\u1E20': // Ḡ [LATIN CAPITAL LETTER G WITH MACRON]
case '\u24BC': // Ⓖ [CIRCLED LATIN CAPITAL LETTER G]
case '\uA77D': // Ᵹ [LATIN CAPITAL LETTER INSULAR G]
case '\uA77E': // Ꝿ [LATIN CAPITAL LETTER TURNED INSULAR G]
case '\uFF27': // G [FULLWIDTH LATIN CAPITAL LETTER G]
output[outputPos++] = 'G';
break;
case '\u011D': // ĝ [LATIN SMALL LETTER G WITH CIRCUMFLEX]
case '\u011F': // ğ [LATIN SMALL LETTER G WITH BREVE]
case '\u0121': // ġ [LATIN SMALL LETTER G WITH DOT ABOVE]
case '\u0123': // ģ [LATIN SMALL LETTER G WITH CEDILLA]
case '\u01F5': // ǵ [LATIN SMALL LETTER G WITH ACUTE]
case '\u0260': // ɠ [LATIN SMALL LETTER G WITH HOOK]
case '\u0261': // ɡ [LATIN SMALL LETTER SCRIPT G]
case '\u1D77': // ᵷ [LATIN SMALL LETTER TURNED G]
case '\u1D79': // ᵹ [LATIN SMALL LETTER INSULAR G]
case '\u1D83': // ᶃ [LATIN SMALL LETTER G WITH PALATAL HOOK]
case '\u1E21': // ḡ [LATIN SMALL LETTER G WITH MACRON]
case '\u24D6': // ⓖ [CIRCLED LATIN SMALL LETTER G]
case '\uA77F': // ꝿ [LATIN SMALL LETTER TURNED INSULAR G]
case '\uFF47': // g [FULLWIDTH LATIN SMALL LETTER G]
output[outputPos++] = 'g';
break;
case '\u24A2': // ⒢ [PARENTHESIZED LATIN SMALL LETTER G]
output[outputPos++] = '(';
output[outputPos++] = 'g';
output[outputPos++] = ')';
break;
case '\u0124': // Ĥ [LATIN CAPITAL LETTER H WITH CIRCUMFLEX]
case '\u0126': // Ħ [LATIN CAPITAL LETTER H WITH STROKE]
case '\u021E': // Ȟ [LATIN CAPITAL LETTER H WITH CARON]
case '\u029C': // ʜ [LATIN LETTER SMALL CAPITAL H]
case '\u1E22': // Ḣ [LATIN CAPITAL LETTER H WITH DOT ABOVE]
case '\u1E24': // Ḥ [LATIN CAPITAL LETTER H WITH DOT BELOW]
case '\u1E26': // Ḧ [LATIN CAPITAL LETTER H WITH DIAERESIS]
case '\u1E28': // Ḩ [LATIN CAPITAL LETTER H WITH CEDILLA]
case '\u1E2A': // Ḫ [LATIN CAPITAL LETTER H WITH BREVE BELOW]
case '\u24BD': // Ⓗ [CIRCLED LATIN CAPITAL LETTER H]
case '\u2C67': // Ⱨ [LATIN CAPITAL LETTER H WITH DESCENDER]
case '\u2C75': // Ⱶ [LATIN CAPITAL LETTER HALF H]
case '\uFF28': // H [FULLWIDTH LATIN CAPITAL LETTER H]
output[outputPos++] = 'H';
break;
case '\u0125': // ĥ [LATIN SMALL LETTER H WITH CIRCUMFLEX]
case '\u0127': // ħ [LATIN SMALL LETTER H WITH STROKE]
case '\u021F': // ȟ [LATIN SMALL LETTER H WITH CARON]
case '\u0265': // ɥ [LATIN SMALL LETTER TURNED H]
case '\u0266': // ɦ [LATIN SMALL LETTER H WITH HOOK]
case '\u02AE': // ʮ [LATIN SMALL LETTER TURNED H WITH FISHHOOK]
case '\u02AF': // ʯ [LATIN SMALL LETTER TURNED H WITH FISHHOOK AND TAIL]
case '\u1E23': // ḣ [LATIN SMALL LETTER H WITH DOT ABOVE]
case '\u1E25': // ḥ [LATIN SMALL LETTER H WITH DOT BELOW]
case '\u1E27': // ḧ [LATIN SMALL LETTER H WITH DIAERESIS]
case '\u1E29': // ḩ [LATIN SMALL LETTER H WITH CEDILLA]
case '\u1E2B': // ḫ [LATIN SMALL LETTER H WITH BREVE BELOW]
case '\u1E96': // ẖ [LATIN SMALL LETTER H WITH LINE BELOW]
case '\u24D7': // ⓗ [CIRCLED LATIN SMALL LETTER H]
case '\u2C68': // ⱨ [LATIN SMALL LETTER H WITH DESCENDER]
case '\u2C76': // ⱶ [LATIN SMALL LETTER HALF H]
case '\uFF48': // h [FULLWIDTH LATIN SMALL LETTER H]
output[outputPos++] = 'h';
break;
case '\u01F6': // Ƕ http://en.wikipedia.org/wiki/Hwair [LATIN CAPITAL LETTER HWAIR]
output[outputPos++] = 'H';
output[outputPos++] = 'V';
break;
case '\u24A3': // ⒣ [PARENTHESIZED LATIN SMALL LETTER H]
output[outputPos++] = '(';
output[outputPos++] = 'h';
output[outputPos++] = ')';
break;
case '\u0195': // ƕ [LATIN SMALL LETTER HV]
output[outputPos++] = 'h';
output[outputPos++] = 'v';
break;
case '\u00CC': // Ì [LATIN CAPITAL LETTER I WITH GRAVE]
case '\u00CD': // Í [LATIN CAPITAL LETTER I WITH ACUTE]
case '\u00CE': // Î [LATIN CAPITAL LETTER I WITH CIRCUMFLEX]
case '\u00CF': // Ï [LATIN CAPITAL LETTER I WITH DIAERESIS]
case '\u0128': // Ĩ [LATIN CAPITAL LETTER I WITH TILDE]
case '\u012A': // Ī [LATIN CAPITAL LETTER I WITH MACRON]
case '\u012C': // Ĭ [LATIN CAPITAL LETTER I WITH BREVE]
case '\u012E': // Į [LATIN CAPITAL LETTER I WITH OGONEK]
case '\u0130': // İ [LATIN CAPITAL LETTER I WITH DOT ABOVE]
case '\u0196': // Ɩ [LATIN CAPITAL LETTER IOTA]
case '\u0197': // Ɨ [LATIN CAPITAL LETTER I WITH STROKE]
case '\u01CF': // Ǐ [LATIN CAPITAL LETTER I WITH CARON]
case '\u0208': // Ȉ [LATIN CAPITAL LETTER I WITH DOUBLE GRAVE]
case '\u020A': // Ȋ [LATIN CAPITAL LETTER I WITH INVERTED BREVE]
case '\u026A': // ɪ [LATIN LETTER SMALL CAPITAL I]
case '\u1D7B': // ᵻ [LATIN SMALL CAPITAL LETTER I WITH STROKE]
case '\u1E2C': // Ḭ [LATIN CAPITAL LETTER I WITH TILDE BELOW]
case '\u1E2E': // Ḯ [LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE]
case '\u1EC8': // Ỉ [LATIN CAPITAL LETTER I WITH HOOK ABOVE]
case '\u1ECA': // Ị [LATIN CAPITAL LETTER I WITH DOT BELOW]
case '\u24BE': // Ⓘ [CIRCLED LATIN CAPITAL LETTER I]
case '\uA7FE': // ꟾ [LATIN EPIGRAPHIC LETTER I LONGA]
case '\uFF29': // I [FULLWIDTH LATIN CAPITAL LETTER I]
output[outputPos++] = 'I';
break;
case '\u00EC': // ì [LATIN SMALL LETTER I WITH GRAVE]
case '\u00ED': // í [LATIN SMALL LETTER I WITH ACUTE]
case '\u00EE': // î [LATIN SMALL LETTER I WITH CIRCUMFLEX]
case '\u00EF': // ï [LATIN SMALL LETTER I WITH DIAERESIS]
case '\u0129': // ĩ [LATIN SMALL LETTER I WITH TILDE]
case '\u012B': // ī [LATIN SMALL LETTER I WITH MACRON]
case '\u012D': // ĭ [LATIN SMALL LETTER I WITH BREVE]
case '\u012F': // į [LATIN SMALL LETTER I WITH OGONEK]
case '\u0131': // ı [LATIN SMALL LETTER DOTLESS I]
case '\u01D0': // ǐ [LATIN SMALL LETTER I WITH CARON]
case '\u0209': // ȉ [LATIN SMALL LETTER I WITH DOUBLE GRAVE]
case '\u020B': // ȋ [LATIN SMALL LETTER I WITH INVERTED BREVE]
case '\u0268': // ɨ [LATIN SMALL LETTER I WITH STROKE]
case '\u1D09': // ᴉ [LATIN SMALL LETTER TURNED I]
case '\u1D62': // ᵢ [LATIN SUBSCRIPT SMALL LETTER I]
case '\u1D7C': // ᵼ [LATIN SMALL LETTER IOTA WITH STROKE]
case '\u1D96': // ᶖ [LATIN SMALL LETTER I WITH RETROFLEX HOOK]
case '\u1E2D': // ḭ [LATIN SMALL LETTER I WITH TILDE BELOW]
case '\u1E2F': // ḯ [LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE]
case '\u1EC9': // ỉ [LATIN SMALL LETTER I WITH HOOK ABOVE]
case '\u1ECB': // ị [LATIN SMALL LETTER I WITH DOT BELOW]
case '\u2071': // ⁱ [SUPERSCRIPT LATIN SMALL LETTER I]
case '\u24D8': // ⓘ [CIRCLED LATIN SMALL LETTER I]
case '\uFF49': // i [FULLWIDTH LATIN SMALL LETTER I]
output[outputPos++] = 'i';
break;
case '\u0132': // IJ [LATIN CAPITAL LIGATURE IJ]
output[outputPos++] = 'I';
output[outputPos++] = 'J';
break;
case '\u24A4': // ⒤ [PARENTHESIZED LATIN SMALL LETTER I]
output[outputPos++] = '(';
output[outputPos++] = 'i';
output[outputPos++] = ')';
break;
case '\u0133': // ij [LATIN SMALL LIGATURE IJ]
output[outputPos++] = 'i';
output[outputPos++] = 'j';
break;
case '\u0134': // Ĵ [LATIN CAPITAL LETTER J WITH CIRCUMFLEX]
case '\u0248': // Ɉ [LATIN CAPITAL LETTER J WITH STROKE]
case '\u1D0A': // ᴊ [LATIN LETTER SMALL CAPITAL J]
case '\u24BF': // Ⓙ [CIRCLED LATIN CAPITAL LETTER J]
case '\uFF2A': // J [FULLWIDTH LATIN CAPITAL LETTER J]
output[outputPos++] = 'J';
break;
case '\u0135': // ĵ [LATIN SMALL LETTER J WITH CIRCUMFLEX]
case '\u01F0': // ǰ [LATIN SMALL LETTER J WITH CARON]
case '\u0237': // ȷ [LATIN SMALL LETTER DOTLESS J]
case '\u0249': // ɉ [LATIN SMALL LETTER J WITH STROKE]
case '\u025F': // ɟ [LATIN SMALL LETTER DOTLESS J WITH STROKE]
case '\u0284': // ʄ [LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK]
case '\u029D': // ʝ [LATIN SMALL LETTER J WITH CROSSED-TAIL]
case '\u24D9': // ⓙ [CIRCLED LATIN SMALL LETTER J]
case '\u2C7C': // ⱼ [LATIN SUBSCRIPT SMALL LETTER J]
case '\uFF4A': // j [FULLWIDTH LATIN SMALL LETTER J]
output[outputPos++] = 'j';
break;
case '\u24A5': // ⒥ [PARENTHESIZED LATIN SMALL LETTER J]
output[outputPos++] = '(';
output[outputPos++] = 'j';
output[outputPos++] = ')';
break;
case '\u0136': // Ķ [LATIN CAPITAL LETTER K WITH CEDILLA]
case '\u0198': // Ƙ [LATIN CAPITAL LETTER K WITH HOOK]
case '\u01E8': // Ǩ [LATIN CAPITAL LETTER K WITH CARON]
case '\u1D0B': // ᴋ [LATIN LETTER SMALL CAPITAL K]
case '\u1E30': // Ḱ [LATIN CAPITAL LETTER K WITH ACUTE]
case '\u1E32': // Ḳ [LATIN CAPITAL LETTER K WITH DOT BELOW]
case '\u1E34': // Ḵ [LATIN CAPITAL LETTER K WITH LINE BELOW]
case '\u24C0': // Ⓚ [CIRCLED LATIN CAPITAL LETTER K]
case '\u2C69': // Ⱪ [LATIN CAPITAL LETTER K WITH DESCENDER]
case '\uA740': // Ꝁ [LATIN CAPITAL LETTER K WITH STROKE]
case '\uA742': // Ꝃ [LATIN CAPITAL LETTER K WITH DIAGONAL STROKE]
case '\uA744': // Ꝅ [LATIN CAPITAL LETTER K WITH STROKE AND DIAGONAL STROKE]
case '\uFF2B': // K [FULLWIDTH LATIN CAPITAL LETTER K]
output[outputPos++] = 'K';
break;
case '\u0137': // ķ [LATIN SMALL LETTER K WITH CEDILLA]
case '\u0199': // ƙ [LATIN SMALL LETTER K WITH HOOK]
case '\u01E9': // ǩ [LATIN SMALL LETTER K WITH CARON]
case '\u029E': // ʞ [LATIN SMALL LETTER TURNED K]
case '\u1D84': // ᶄ [LATIN SMALL LETTER K WITH PALATAL HOOK]
case '\u1E31': // ḱ [LATIN SMALL LETTER K WITH ACUTE]
case '\u1E33': // ḳ [LATIN SMALL LETTER K WITH DOT BELOW]
case '\u1E35': // ḵ [LATIN SMALL LETTER K WITH LINE BELOW]
case '\u24DA': // ⓚ [CIRCLED LATIN SMALL LETTER K]
case '\u2C6A': // ⱪ [LATIN SMALL LETTER K WITH DESCENDER]
case '\uA741': // ꝁ [LATIN SMALL LETTER K WITH STROKE]
case '\uA743': // ꝃ [LATIN SMALL LETTER K WITH DIAGONAL STROKE]
case '\uA745': // ꝅ [LATIN SMALL LETTER K WITH STROKE AND DIAGONAL STROKE]
case '\uFF4B': // k [FULLWIDTH LATIN SMALL LETTER K]
output[outputPos++] = 'k';
break;
case '\u24A6': // ⒦ [PARENTHESIZED LATIN SMALL LETTER K]
output[outputPos++] = '(';
output[outputPos++] = 'k';
output[outputPos++] = ')';
break;
case '\u0139': // Ĺ [LATIN CAPITAL LETTER L WITH ACUTE]
case '\u013B': // Ļ [LATIN CAPITAL LETTER L WITH CEDILLA]
case '\u013D': // Ľ [LATIN CAPITAL LETTER L WITH CARON]
case '\u013F': // Ŀ [LATIN CAPITAL LETTER L WITH MIDDLE DOT]
case '\u0141': // Ł [LATIN CAPITAL LETTER L WITH STROKE]
case '\u023D': // Ƚ [LATIN CAPITAL LETTER L WITH BAR]
case '\u029F': // ʟ [LATIN LETTER SMALL CAPITAL L]
case '\u1D0C': // ᴌ [LATIN LETTER SMALL CAPITAL L WITH STROKE]
case '\u1E36': // Ḷ [LATIN CAPITAL LETTER L WITH DOT BELOW]
case '\u1E38': // Ḹ [LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON]
case '\u1E3A': // Ḻ [LATIN CAPITAL LETTER L WITH LINE BELOW]
case '\u1E3C': // Ḽ [LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW]
case '\u24C1': // Ⓛ [CIRCLED LATIN CAPITAL LETTER L]
case '\u2C60': // Ⱡ [LATIN CAPITAL LETTER L WITH DOUBLE BAR]
case '\u2C62': // Ɫ [LATIN CAPITAL LETTER L WITH MIDDLE TILDE]
case '\uA746': // Ꝇ [LATIN CAPITAL LETTER BROKEN L]
case '\uA748': // Ꝉ [LATIN CAPITAL LETTER L WITH HIGH STROKE]
case '\uA780': // Ꞁ [LATIN CAPITAL LETTER TURNED L]
case '\uFF2C': // L [FULLWIDTH LATIN CAPITAL LETTER L]
output[outputPos++] = 'L';
break;
case '\u013A': // ĺ [LATIN SMALL LETTER L WITH ACUTE]
case '\u013C': // ļ [LATIN SMALL LETTER L WITH CEDILLA]
case '\u013E': // ľ [LATIN SMALL LETTER L WITH CARON]
case '\u0140': // ŀ [LATIN SMALL LETTER L WITH MIDDLE DOT]
case '\u0142': // ł [LATIN SMALL LETTER L WITH STROKE]
case '\u019A': // ƚ [LATIN SMALL LETTER L WITH BAR]
case '\u0234': // ȴ [LATIN SMALL LETTER L WITH CURL]
case '\u026B': // ɫ [LATIN SMALL LETTER L WITH MIDDLE TILDE]
case '\u026C': // ɬ [LATIN SMALL LETTER L WITH BELT]
case '\u026D': // ɭ [LATIN SMALL LETTER L WITH RETROFLEX HOOK]
case '\u1D85': // ᶅ [LATIN SMALL LETTER L WITH PALATAL HOOK]
case '\u1E37': // ḷ [LATIN SMALL LETTER L WITH DOT BELOW]
case '\u1E39': // ḹ [LATIN SMALL LETTER L WITH DOT BELOW AND MACRON]
case '\u1E3B': // ḻ [LATIN SMALL LETTER L WITH LINE BELOW]
case '\u1E3D': // ḽ [LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW]
case '\u24DB': // ⓛ [CIRCLED LATIN SMALL LETTER L]
case '\u2C61': // ⱡ [LATIN SMALL LETTER L WITH DOUBLE BAR]
case '\uA747': // ꝇ [LATIN SMALL LETTER BROKEN L]
case '\uA749': // ꝉ [LATIN SMALL LETTER L WITH HIGH STROKE]
case '\uA781': // ꞁ [LATIN SMALL LETTER TURNED L]
case '\uFF4C': // l [FULLWIDTH LATIN SMALL LETTER L]
output[outputPos++] = 'l';
break;
case '\u01C7': // LJ [LATIN CAPITAL LETTER LJ]
output[outputPos++] = 'L';
output[outputPos++] = 'J';
break;
case '\u1EFA': // Ỻ [LATIN CAPITAL LETTER MIDDLE-WELSH LL]
output[outputPos++] = 'L';
output[outputPos++] = 'L';
break;
case '\u01C8': // Lj [LATIN CAPITAL LETTER L WITH SMALL LETTER J]
output[outputPos++] = 'L';
output[outputPos++] = 'j';
break;
case '\u24A7': // ⒧ [PARENTHESIZED LATIN SMALL LETTER L]
output[outputPos++] = '(';
output[outputPos++] = 'l';
output[outputPos++] = ')';
break;
case '\u01C9': // lj [LATIN SMALL LETTER LJ]
output[outputPos++] = 'l';
output[outputPos++] = 'j';
break;
case '\u1EFB': // ỻ [LATIN SMALL LETTER MIDDLE-WELSH LL]
output[outputPos++] = 'l';
output[outputPos++] = 'l';
break;
case '\u02AA': // ʪ [LATIN SMALL LETTER LS DIGRAPH]
output[outputPos++] = 'l';
output[outputPos++] = 's';
break;
case '\u02AB': // ʫ [LATIN SMALL LETTER LZ DIGRAPH]
output[outputPos++] = 'l';
output[outputPos++] = 'z';
break;
case '\u019C': // Ɯ [LATIN CAPITAL LETTER TURNED M]
case '\u1D0D': // ᴍ [LATIN LETTER SMALL CAPITAL M]
case '\u1E3E': // Ḿ [LATIN CAPITAL LETTER M WITH ACUTE]
case '\u1E40': // Ṁ [LATIN CAPITAL LETTER M WITH DOT ABOVE]
case '\u1E42': // Ṃ [LATIN CAPITAL LETTER M WITH DOT BELOW]
case '\u24C2': // Ⓜ [CIRCLED LATIN CAPITAL LETTER M]
case '\u2C6E': // Ɱ [LATIN CAPITAL LETTER M WITH HOOK]
case '\uA7FD': // ꟽ [LATIN EPIGRAPHIC LETTER INVERTED M]
case '\uA7FF': // ꟿ [LATIN EPIGRAPHIC LETTER ARCHAIC M]
case '\uFF2D': // M [FULLWIDTH LATIN CAPITAL LETTER M]
output[outputPos++] = 'M';
break;
case '\u026F': // ɯ [LATIN SMALL LETTER TURNED M]
case '\u0270': // ɰ [LATIN SMALL LETTER TURNED M WITH LONG LEG]
case '\u0271': // ɱ [LATIN SMALL LETTER M WITH HOOK]
case '\u1D6F': // ᵯ [LATIN SMALL LETTER M WITH MIDDLE TILDE]
case '\u1D86': // ᶆ [LATIN SMALL LETTER M WITH PALATAL HOOK]
case '\u1E3F': // ḿ [LATIN SMALL LETTER M WITH ACUTE]
case '\u1E41': // ṁ [LATIN SMALL LETTER M WITH DOT ABOVE]
case '\u1E43': // ṃ [LATIN SMALL LETTER M WITH DOT BELOW]
case '\u24DC': // ⓜ [CIRCLED LATIN SMALL LETTER M]
case '\uFF4D': // m [FULLWIDTH LATIN SMALL LETTER M]
output[outputPos++] = 'm';
break;
case '\u24A8': // ⒨ [PARENTHESIZED LATIN SMALL LETTER M]
output[outputPos++] = '(';
output[outputPos++] = 'm';
output[outputPos++] = ')';
break;
case '\u00D1': // Ñ [LATIN CAPITAL LETTER N WITH TILDE]
case '\u0143': // Ń [LATIN CAPITAL LETTER N WITH ACUTE]
case '\u0145': // Ņ [LATIN CAPITAL LETTER N WITH CEDILLA]
case '\u0147': // Ň [LATIN CAPITAL LETTER N WITH CARON]
case '\u014A': // Ŋ http://en.wikipedia.org/wiki/Eng_(letter) [LATIN CAPITAL LETTER ENG]
case '\u019D': // Ɲ [LATIN CAPITAL LETTER N WITH LEFT HOOK]
case '\u01F8': // Ǹ [LATIN CAPITAL LETTER N WITH GRAVE]
case '\u0220': // Ƞ [LATIN CAPITAL LETTER N WITH LONG RIGHT LEG]
case '\u0274': // ɴ [LATIN LETTER SMALL CAPITAL N]
case '\u1D0E': // ᴎ [LATIN LETTER SMALL CAPITAL REVERSED N]
case '\u1E44': // Ṅ [LATIN CAPITAL LETTER N WITH DOT ABOVE]
case '\u1E46': // Ṇ [LATIN CAPITAL LETTER N WITH DOT BELOW]
case '\u1E48': // Ṉ [LATIN CAPITAL LETTER N WITH LINE BELOW]
case '\u1E4A': // Ṋ [LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW]
case '\u24C3': // Ⓝ [CIRCLED LATIN CAPITAL LETTER N]
case '\uFF2E': // N [FULLWIDTH LATIN CAPITAL LETTER N]
output[outputPos++] = 'N';
break;
case '\u00F1': // ñ [LATIN SMALL LETTER N WITH TILDE]
case '\u0144': // ń [LATIN SMALL LETTER N WITH ACUTE]
case '\u0146': // ņ [LATIN SMALL LETTER N WITH CEDILLA]
case '\u0148': // ň [LATIN SMALL LETTER N WITH CARON]
case '\u0149': // ʼn [LATIN SMALL LETTER N PRECEDED BY APOSTROPHE]
case '\u014B': // ŋ http://en.wikipedia.org/wiki/Eng_(letter) [LATIN SMALL LETTER ENG]
case '\u019E': // ƞ [LATIN SMALL LETTER N WITH LONG RIGHT LEG]
case '\u01F9': // ǹ [LATIN SMALL LETTER N WITH GRAVE]
case '\u0235': // ȵ [LATIN SMALL LETTER N WITH CURL]
case '\u0272': // ɲ [LATIN SMALL LETTER N WITH LEFT HOOK]
case '\u0273': // ɳ [LATIN SMALL LETTER N WITH RETROFLEX HOOK]
case '\u1D70': // ᵰ [LATIN SMALL LETTER N WITH MIDDLE TILDE]
case '\u1D87': // ᶇ [LATIN SMALL LETTER N WITH PALATAL HOOK]
case '\u1E45': // ṅ [LATIN SMALL LETTER N WITH DOT ABOVE]
case '\u1E47': // ṇ [LATIN SMALL LETTER N WITH DOT BELOW]
case '\u1E49': // ṉ [LATIN SMALL LETTER N WITH LINE BELOW]
case '\u1E4B': // ṋ [LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW]
case '\u207F': // ⁿ [SUPERSCRIPT LATIN SMALL LETTER N]
case '\u24DD': // ⓝ [CIRCLED LATIN SMALL LETTER N]
case '\uFF4E': // n [FULLWIDTH LATIN SMALL LETTER N]
output[outputPos++] = 'n';
break;
case '\u01CA': // NJ [LATIN CAPITAL LETTER NJ]
output[outputPos++] = 'N';
output[outputPos++] = 'J';
break;
case '\u01CB': // Nj [LATIN CAPITAL LETTER N WITH SMALL LETTER J]
output[outputPos++] = 'N';
output[outputPos++] = 'j';
break;
case '\u24A9': // ⒩ [PARENTHESIZED LATIN SMALL LETTER N]
output[outputPos++] = '(';
output[outputPos++] = 'n';
output[outputPos++] = ')';
break;
case '\u01CC': // nj [LATIN SMALL LETTER NJ]
output[outputPos++] = 'n';
output[outputPos++] = 'j';
break;
case '\u00D2': // Ò [LATIN CAPITAL LETTER O WITH GRAVE]
case '\u00D3': // Ó [LATIN CAPITAL LETTER O WITH ACUTE]
case '\u00D4': // Ô [LATIN CAPITAL LETTER O WITH CIRCUMFLEX]
case '\u00D5': // Õ [LATIN CAPITAL LETTER O WITH TILDE]
case '\u00D6': // Ö [LATIN CAPITAL LETTER O WITH DIAERESIS]
case '\u00D8': // Ø [LATIN CAPITAL LETTER O WITH STROKE]
case '\u014C': // Ō [LATIN CAPITAL LETTER O WITH MACRON]
case '\u014E': // Ŏ [LATIN CAPITAL LETTER O WITH BREVE]
case '\u0150': // Ő [LATIN CAPITAL LETTER O WITH DOUBLE ACUTE]
case '\u0186': // Ɔ [LATIN CAPITAL LETTER OPEN O]
case '\u019F': // Ɵ [LATIN CAPITAL LETTER O WITH MIDDLE TILDE]
case '\u01A0': // Ơ [LATIN CAPITAL LETTER O WITH HORN]
case '\u01D1': // Ǒ [LATIN CAPITAL LETTER O WITH CARON]
case '\u01EA': // Ǫ [LATIN CAPITAL LETTER O WITH OGONEK]
case '\u01EC': // Ǭ [LATIN CAPITAL LETTER O WITH OGONEK AND MACRON]
case '\u01FE': // Ǿ [LATIN CAPITAL LETTER O WITH STROKE AND ACUTE]
case '\u020C': // Ȍ [LATIN CAPITAL LETTER O WITH DOUBLE GRAVE]
case '\u020E': // Ȏ [LATIN CAPITAL LETTER O WITH INVERTED BREVE]
case '\u022A': // Ȫ [LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON]
case '\u022C': // Ȭ [LATIN CAPITAL LETTER O WITH TILDE AND MACRON]
case '\u022E': // Ȯ [LATIN CAPITAL LETTER O WITH DOT ABOVE]
case '\u0230': // Ȱ [LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON]
case '\u1D0F': // ᴏ [LATIN LETTER SMALL CAPITAL O]
case '\u1D10': // ᴐ [LATIN LETTER SMALL CAPITAL OPEN O]
case '\u1E4C': // Ṍ [LATIN CAPITAL LETTER O WITH TILDE AND ACUTE]
case '\u1E4E': // Ṏ [LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS]
case '\u1E50': // Ṑ [LATIN CAPITAL LETTER O WITH MACRON AND GRAVE]
case '\u1E52': // Ṓ [LATIN CAPITAL LETTER O WITH MACRON AND ACUTE]
case '\u1ECC': // Ọ [LATIN CAPITAL LETTER O WITH DOT BELOW]
case '\u1ECE': // Ỏ [LATIN CAPITAL LETTER O WITH HOOK ABOVE]
case '\u1ED0': // Ố [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE]
case '\u1ED2': // Ồ [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE]
case '\u1ED4': // Ổ [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE]
case '\u1ED6': // Ỗ [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE]
case '\u1ED8': // Ộ [LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW]
case '\u1EDA': // Ớ [LATIN CAPITAL LETTER O WITH HORN AND ACUTE]
case '\u1EDC': // Ờ [LATIN CAPITAL LETTER O WITH HORN AND GRAVE]
case '\u1EDE': // Ở [LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE]
case '\u1EE0': // Ỡ [LATIN CAPITAL LETTER O WITH HORN AND TILDE]
case '\u1EE2': // Ợ [LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW]
case '\u24C4': // Ⓞ [CIRCLED LATIN CAPITAL LETTER O]
case '\uA74A': // Ꝋ [LATIN CAPITAL LETTER O WITH LONG STROKE OVERLAY]
case '\uA74C': // Ꝍ [LATIN CAPITAL LETTER O WITH LOOP]
case '\uFF2F': // O [FULLWIDTH LATIN CAPITAL LETTER O]
output[outputPos++] = 'O';
break;
case '\u00F2': // ò [LATIN SMALL LETTER O WITH GRAVE]
case '\u00F3': // ó [LATIN SMALL LETTER O WITH ACUTE]
case '\u00F4': // ô [LATIN SMALL LETTER O WITH CIRCUMFLEX]
case '\u00F5': // õ [LATIN SMALL LETTER O WITH TILDE]
case '\u00F6': // ö [LATIN SMALL LETTER O WITH DIAERESIS]
case '\u00F8': // ø [LATIN SMALL LETTER O WITH STROKE]
case '\u014D': // ō [LATIN SMALL LETTER O WITH MACRON]
case '\u014F': // ŏ [LATIN SMALL LETTER O WITH BREVE]
case '\u0151': // ő [LATIN SMALL LETTER O WITH DOUBLE ACUTE]
case '\u01A1': // ơ [LATIN SMALL LETTER O WITH HORN]
case '\u01D2': // ǒ [LATIN SMALL LETTER O WITH CARON]
case '\u01EB': // ǫ [LATIN SMALL LETTER O WITH OGONEK]
case '\u01ED': // ǭ [LATIN SMALL LETTER O WITH OGONEK AND MACRON]
case '\u01FF': // ǿ [LATIN SMALL LETTER O WITH STROKE AND ACUTE]
case '\u020D': // ȍ [LATIN SMALL LETTER O WITH DOUBLE GRAVE]
case '\u020F': // ȏ [LATIN SMALL LETTER O WITH INVERTED BREVE]
case '\u022B': // ȫ [LATIN SMALL LETTER O WITH DIAERESIS AND MACRON]
case '\u022D': // ȭ [LATIN SMALL LETTER O WITH TILDE AND MACRON]
case '\u022F': // ȯ [LATIN SMALL LETTER O WITH DOT ABOVE]
case '\u0231': // ȱ [LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON]
case '\u0254': // ɔ [LATIN SMALL LETTER OPEN O]
case '\u0275': // ɵ [LATIN SMALL LETTER BARRED O]
case '\u1D16': // ᴖ [LATIN SMALL LETTER TOP HALF O]
case '\u1D17': // ᴗ [LATIN SMALL LETTER BOTTOM HALF O]
case '\u1D97': // ᶗ [LATIN SMALL LETTER OPEN O WITH RETROFLEX HOOK]
case '\u1E4D': // ṍ [LATIN SMALL LETTER O WITH TILDE AND ACUTE]
case '\u1E4F': // ṏ [LATIN SMALL LETTER O WITH TILDE AND DIAERESIS]
case '\u1E51': // ṑ [LATIN SMALL LETTER O WITH MACRON AND GRAVE]
case '\u1E53': // ṓ [LATIN SMALL LETTER O WITH MACRON AND ACUTE]
case '\u1ECD': // ọ [LATIN SMALL LETTER O WITH DOT BELOW]
case '\u1ECF': // ỏ [LATIN SMALL LETTER O WITH HOOK ABOVE]
case '\u1ED1': // ố [LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE]
case '\u1ED3': // ồ [LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE]
case '\u1ED5': // ổ [LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE]
case '\u1ED7': // ỗ [LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE]
case '\u1ED9': // ộ [LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW]
case '\u1EDB': // ớ [LATIN SMALL LETTER O WITH HORN AND ACUTE]
case '\u1EDD': // ờ [LATIN SMALL LETTER O WITH HORN AND GRAVE]
case '\u1EDF': // ở [LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE]
case '\u1EE1': // ỡ [LATIN SMALL LETTER O WITH HORN AND TILDE]
case '\u1EE3': // ợ [LATIN SMALL LETTER O WITH HORN AND DOT BELOW]
case '\u2092': // ₒ [LATIN SUBSCRIPT SMALL LETTER O]
case '\u24DE': // ⓞ [CIRCLED LATIN SMALL LETTER O]
case '\u2C7A': // ⱺ [LATIN SMALL LETTER O WITH LOW RING INSIDE]
case '\uA74B': // ꝋ [LATIN SMALL LETTER O WITH LONG STROKE OVERLAY]
case '\uA74D': // ꝍ [LATIN SMALL LETTER O WITH LOOP]
case '\uFF4F': // o [FULLWIDTH LATIN SMALL LETTER O]
output[outputPos++] = 'o';
break;
case '\u0152': // Œ [LATIN CAPITAL LIGATURE OE]
case '\u0276': // ɶ [LATIN LETTER SMALL CAPITAL OE]
output[outputPos++] = 'O';
output[outputPos++] = 'E';
break;
case '\uA74E': // Ꝏ [LATIN CAPITAL LETTER OO]
output[outputPos++] = 'O';
output[outputPos++] = 'O';
break;
case '\u0222': // Ȣ http://en.wikipedia.org/wiki/OU [LATIN CAPITAL LETTER OU]
case '\u1D15': // ᴕ [LATIN LETTER SMALL CAPITAL OU]
output[outputPos++] = 'O';
output[outputPos++] = 'U';
break;
case '\u24AA': // ⒪ [PARENTHESIZED LATIN SMALL LETTER O]
output[outputPos++] = '(';
output[outputPos++] = 'o';
output[outputPos++] = ')';
break;
case '\u0153': // œ [LATIN SMALL LIGATURE OE]
case '\u1D14': // ᴔ [LATIN SMALL LETTER TURNED OE]
output[outputPos++] = 'o';
output[outputPos++] = 'e';
break;
case '\uA74F': // ꝏ [LATIN SMALL LETTER OO]
output[outputPos++] = 'o';
output[outputPos++] = 'o';
break;
case '\u0223': // ȣ http://en.wikipedia.org/wiki/OU [LATIN SMALL LETTER OU]
output[outputPos++] = 'o';
output[outputPos++] = 'u';
break;
case '\u01A4': // Ƥ [LATIN CAPITAL LETTER P WITH HOOK]
case '\u1D18': // ᴘ [LATIN LETTER SMALL CAPITAL P]
case '\u1E54': // Ṕ [LATIN CAPITAL LETTER P WITH ACUTE]
case '\u1E56': // Ṗ [LATIN CAPITAL LETTER P WITH DOT ABOVE]
case '\u24C5': // Ⓟ [CIRCLED LATIN CAPITAL LETTER P]
case '\u2C63': // Ᵽ [LATIN CAPITAL LETTER P WITH STROKE]
case '\uA750': // Ꝑ [LATIN CAPITAL LETTER P WITH STROKE THROUGH DESCENDER]
case '\uA752': // Ꝓ [LATIN CAPITAL LETTER P WITH FLOURISH]
case '\uA754': // Ꝕ [LATIN CAPITAL LETTER P WITH SQUIRREL TAIL]
case '\uFF30': // P [FULLWIDTH LATIN CAPITAL LETTER P]
output[outputPos++] = 'P';
break;
case '\u01A5': // ƥ [LATIN SMALL LETTER P WITH HOOK]
case '\u1D71': // ᵱ [LATIN SMALL LETTER P WITH MIDDLE TILDE]
case '\u1D7D': // ᵽ [LATIN SMALL LETTER P WITH STROKE]
case '\u1D88': // ᶈ [LATIN SMALL LETTER P WITH PALATAL HOOK]
case '\u1E55': // ṕ [LATIN SMALL LETTER P WITH ACUTE]
case '\u1E57': // ṗ [LATIN SMALL LETTER P WITH DOT ABOVE]
case '\u24DF': // ⓟ [CIRCLED LATIN SMALL LETTER P]
case '\uA751': // ꝑ [LATIN SMALL LETTER P WITH STROKE THROUGH DESCENDER]
case '\uA753': // ꝓ [LATIN SMALL LETTER P WITH FLOURISH]
case '\uA755': // ꝕ [LATIN SMALL LETTER P WITH SQUIRREL TAIL]
case '\uA7FC': // ꟼ [LATIN EPIGRAPHIC LETTER REVERSED P]
case '\uFF50': // p [FULLWIDTH LATIN SMALL LETTER P]
output[outputPos++] = 'p';
break;
case '\u24AB': // ⒫ [PARENTHESIZED LATIN SMALL LETTER P]
output[outputPos++] = '(';
output[outputPos++] = 'p';
output[outputPos++] = ')';
break;
case '\u024A': // Ɋ [LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL]
case '\u24C6': // Ⓠ [CIRCLED LATIN CAPITAL LETTER Q]
case '\uA756': // Ꝗ [LATIN CAPITAL LETTER Q WITH STROKE THROUGH DESCENDER]
case '\uA758': // Ꝙ [LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE]
case '\uFF31': // Q [FULLWIDTH LATIN CAPITAL LETTER Q]
output[outputPos++] = 'Q';
break;
case '\u0138': // ĸ http://en.wikipedia.org/wiki/Kra_(letter) [LATIN SMALL LETTER KRA]
case '\u024B': // ɋ [LATIN SMALL LETTER Q WITH HOOK TAIL]
case '\u02A0': // ʠ [LATIN SMALL LETTER Q WITH HOOK]
case '\u24E0': // ⓠ [CIRCLED LATIN SMALL LETTER Q]
case '\uA757': // ꝗ [LATIN SMALL LETTER Q WITH STROKE THROUGH DESCENDER]
case '\uA759': // ꝙ [LATIN SMALL LETTER Q WITH DIAGONAL STROKE]
case '\uFF51': // q [FULLWIDTH LATIN SMALL LETTER Q]
output[outputPos++] = 'q';
break;
case '\u24AC': // ⒬ [PARENTHESIZED LATIN SMALL LETTER Q]
output[outputPos++] = '(';
output[outputPos++] = 'q';
output[outputPos++] = ')';
break;
case '\u0239': // ȹ [LATIN SMALL LETTER QP DIGRAPH]
output[outputPos++] = 'q';
output[outputPos++] = 'p';
break;
case '\u0154': // Ŕ [LATIN CAPITAL LETTER R WITH ACUTE]
case '\u0156': // Ŗ [LATIN CAPITAL LETTER R WITH CEDILLA]
case '\u0158': // Ř [LATIN CAPITAL LETTER R WITH CARON]
case '\u0210': // Ȓ [LATIN CAPITAL LETTER R WITH DOUBLE GRAVE]
case '\u0212': // Ȓ [LATIN CAPITAL LETTER R WITH INVERTED BREVE]
case '\u024C': // Ɍ [LATIN CAPITAL LETTER R WITH STROKE]
case '\u0280': // ʀ [LATIN LETTER SMALL CAPITAL R]
case '\u0281': // ʁ [LATIN LETTER SMALL CAPITAL INVERTED R]
case '\u1D19': // ᴙ [LATIN LETTER SMALL CAPITAL REVERSED R]
case '\u1D1A': // ᴚ [LATIN LETTER SMALL CAPITAL TURNED R]
case '\u1E58': // Ṙ [LATIN CAPITAL LETTER R WITH DOT ABOVE]
case '\u1E5A': // Ṛ [LATIN CAPITAL LETTER R WITH DOT BELOW]
case '\u1E5C': // Ṝ [LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON]
case '\u1E5E': // Ṟ [LATIN CAPITAL LETTER R WITH LINE BELOW]
case '\u24C7': // Ⓡ [CIRCLED LATIN CAPITAL LETTER R]
case '\u2C64': // Ɽ [LATIN CAPITAL LETTER R WITH TAIL]
case '\uA75A': // Ꝛ [LATIN CAPITAL LETTER R ROTUNDA]
case '\uA782': // Ꞃ [LATIN CAPITAL LETTER INSULAR R]
case '\uFF32': // R [FULLWIDTH LATIN CAPITAL LETTER R]
output[outputPos++] = 'R';
break;
case '\u0155': // ŕ [LATIN SMALL LETTER R WITH ACUTE]
case '\u0157': // ŗ [LATIN SMALL LETTER R WITH CEDILLA]
case '\u0159': // ř [LATIN SMALL LETTER R WITH CARON]
case '\u0211': // ȑ [LATIN SMALL LETTER R WITH DOUBLE GRAVE]
case '\u0213': // ȓ [LATIN SMALL LETTER R WITH INVERTED BREVE]
case '\u024D': // ɍ [LATIN SMALL LETTER R WITH STROKE]
case '\u027C': // ɼ [LATIN SMALL LETTER R WITH LONG LEG]
case '\u027D': // ɽ [LATIN SMALL LETTER R WITH TAIL]
case '\u027E': // ɾ [LATIN SMALL LETTER R WITH FISHHOOK]
case '\u027F': // ɿ [LATIN SMALL LETTER REVERSED R WITH FISHHOOK]
case '\u1D63': // ᵣ [LATIN SUBSCRIPT SMALL LETTER R]
case '\u1D72': // ᵲ [LATIN SMALL LETTER R WITH MIDDLE TILDE]
case '\u1D73': // ᵳ [LATIN SMALL LETTER R WITH FISHHOOK AND MIDDLE TILDE]
case '\u1D89': // ᶉ [LATIN SMALL LETTER R WITH PALATAL HOOK]
case '\u1E59': // ṙ [LATIN SMALL LETTER R WITH DOT ABOVE]
case '\u1E5B': // ṛ [LATIN SMALL LETTER R WITH DOT BELOW]
case '\u1E5D': // ṝ [LATIN SMALL LETTER R WITH DOT BELOW AND MACRON]
case '\u1E5F': // ṟ [LATIN SMALL LETTER R WITH LINE BELOW]
case '\u24E1': // ⓡ [CIRCLED LATIN SMALL LETTER R]
case '\uA75B': // ꝛ [LATIN SMALL LETTER R ROTUNDA]
case '\uA783': // ꞃ [LATIN SMALL LETTER INSULAR R]
case '\uFF52': // r [FULLWIDTH LATIN SMALL LETTER R]
output[outputPos++] = 'r';
break;
case '\u24AD': // ⒭ [PARENTHESIZED LATIN SMALL LETTER R]
output[outputPos++] = '(';
output[outputPos++] = 'r';
output[outputPos++] = ')';
break;
case '\u015A': // Ś [LATIN CAPITAL LETTER S WITH ACUTE]
case '\u015C': // Ŝ [LATIN CAPITAL LETTER S WITH CIRCUMFLEX]
case '\u015E': // Ş [LATIN CAPITAL LETTER S WITH CEDILLA]
case '\u0160': // Š [LATIN CAPITAL LETTER S WITH CARON]
case '\u0218': // Ș [LATIN CAPITAL LETTER S WITH COMMA BELOW]
case '\u1E60': // Ṡ [LATIN CAPITAL LETTER S WITH DOT ABOVE]
case '\u1E62': // Ṣ [LATIN CAPITAL LETTER S WITH DOT BELOW]
case '\u1E64': // Ṥ [LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE]
case '\u1E66': // Ṧ [LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE]
case '\u1E68': // Ṩ [LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE]
case '\u24C8': // Ⓢ [CIRCLED LATIN CAPITAL LETTER S]
case '\uA731': // ꜱ [LATIN LETTER SMALL CAPITAL S]
case '\uA785': // ꞅ [LATIN SMALL LETTER INSULAR S]
case '\uFF33': // S [FULLWIDTH LATIN CAPITAL LETTER S]
output[outputPos++] = 'S';
break;
case '\u015B': // ś [LATIN SMALL LETTER S WITH ACUTE]
case '\u015D': // ŝ [LATIN SMALL LETTER S WITH CIRCUMFLEX]
case '\u015F': // ş [LATIN SMALL LETTER S WITH CEDILLA]
case '\u0161': // š [LATIN SMALL LETTER S WITH CARON]
case '\u017F': // ſ http://en.wikipedia.org/wiki/Long_S [LATIN SMALL LETTER LONG S]
case '\u0219': // ș [LATIN SMALL LETTER S WITH COMMA BELOW]
case '\u023F': // ȿ [LATIN SMALL LETTER S WITH SWASH TAIL]
case '\u0282': // ʂ [LATIN SMALL LETTER S WITH HOOK]
case '\u1D74': // ᵴ [LATIN SMALL LETTER S WITH MIDDLE TILDE]
case '\u1D8A': // ᶊ [LATIN SMALL LETTER S WITH PALATAL HOOK]
case '\u1E61': // ṡ [LATIN SMALL LETTER S WITH DOT ABOVE]
case '\u1E63': // ṣ [LATIN SMALL LETTER S WITH DOT BELOW]
case '\u1E65': // ṥ [LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE]
case '\u1E67': // ṧ [LATIN SMALL LETTER S WITH CARON AND DOT ABOVE]
case '\u1E69': // ṩ [LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE]
case '\u1E9C': // ẜ [LATIN SMALL LETTER LONG S WITH DIAGONAL STROKE]
case '\u1E9D': // ẝ [LATIN SMALL LETTER LONG S WITH HIGH STROKE]
case '\u24E2': // ⓢ [CIRCLED LATIN SMALL LETTER S]
case '\uA784': // Ꞅ [LATIN CAPITAL LETTER INSULAR S]
case '\uFF53': // s [FULLWIDTH LATIN SMALL LETTER S]
output[outputPos++] = 's';
break;
case '\u1E9E': // ẞ [LATIN CAPITAL LETTER SHARP S]
output[outputPos++] = 'S';
output[outputPos++] = 'S';
break;
case '\u24AE': // ⒮ [PARENTHESIZED LATIN SMALL LETTER S]
output[outputPos++] = '(';
output[outputPos++] = 's';
output[outputPos++] = ')';
break;
case '\u00DF': // ß [LATIN SMALL LETTER SHARP S]
output[outputPos++] = 's';
output[outputPos++] = 's';
break;
case '\uFB06': // st [LATIN SMALL LIGATURE ST]
output[outputPos++] = 's';
output[outputPos++] = 't';
break;
case '\u0162': // Ţ [LATIN CAPITAL LETTER T WITH CEDILLA]
case '\u0164': // Ť [LATIN CAPITAL LETTER T WITH CARON]
case '\u0166': // Ŧ [LATIN CAPITAL LETTER T WITH STROKE]
case '\u01AC': // Ƭ [LATIN CAPITAL LETTER T WITH HOOK]
case '\u01AE': // Ʈ [LATIN CAPITAL LETTER T WITH RETROFLEX HOOK]
case '\u021A': // Ț [LATIN CAPITAL LETTER T WITH COMMA BELOW]
case '\u023E': // Ⱦ [LATIN CAPITAL LETTER T WITH DIAGONAL STROKE]
case '\u1D1B': // ᴛ [LATIN LETTER SMALL CAPITAL T]
case '\u1E6A': // Ṫ [LATIN CAPITAL LETTER T WITH DOT ABOVE]
case '\u1E6C': // Ṭ [LATIN CAPITAL LETTER T WITH DOT BELOW]
case '\u1E6E': // Ṯ [LATIN CAPITAL LETTER T WITH LINE BELOW]
case '\u1E70': // Ṱ [LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW]
case '\u24C9': // Ⓣ [CIRCLED LATIN CAPITAL LETTER T]
case '\uA786': // Ꞇ [LATIN CAPITAL LETTER INSULAR T]
case '\uFF34': // T [FULLWIDTH LATIN CAPITAL LETTER T]
output[outputPos++] = 'T';
break;
case '\u0163': // ţ [LATIN SMALL LETTER T WITH CEDILLA]
case '\u0165': // ť [LATIN SMALL LETTER T WITH CARON]
case '\u0167': // ŧ [LATIN SMALL LETTER T WITH STROKE]
case '\u01AB': // ƫ [LATIN SMALL LETTER T WITH PALATAL HOOK]
case '\u01AD': // ƭ [LATIN SMALL LETTER T WITH HOOK]
case '\u021B': // ț [LATIN SMALL LETTER T WITH COMMA BELOW]
case '\u0236': // ȶ [LATIN SMALL LETTER T WITH CURL]
case '\u0287': // ʇ [LATIN SMALL LETTER TURNED T]
case '\u0288': // ʈ [LATIN SMALL LETTER T WITH RETROFLEX HOOK]
case '\u1D75': // ᵵ [LATIN SMALL LETTER T WITH MIDDLE TILDE]
case '\u1E6B': // ṫ [LATIN SMALL LETTER T WITH DOT ABOVE]
case '\u1E6D': // ṭ [LATIN SMALL LETTER T WITH DOT BELOW]
case '\u1E6F': // ṯ [LATIN SMALL LETTER T WITH LINE BELOW]
case '\u1E71': // ṱ [LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW]
case '\u1E97': // ẗ [LATIN SMALL LETTER T WITH DIAERESIS]
case '\u24E3': // ⓣ [CIRCLED LATIN SMALL LETTER T]
case '\u2C66': // ⱦ [LATIN SMALL LETTER T WITH DIAGONAL STROKE]
case '\uFF54': // t [FULLWIDTH LATIN SMALL LETTER T]
output[outputPos++] = 't';
break;
case '\u00DE': // Þ [LATIN CAPITAL LETTER THORN]
case '\uA766': // Ꝧ [LATIN CAPITAL LETTER THORN WITH STROKE THROUGH DESCENDER]
output[outputPos++] = 'T';
output[outputPos++] = 'H';
break;
case '\uA728': // Ꜩ [LATIN CAPITAL LETTER TZ]
output[outputPos++] = 'T';
output[outputPos++] = 'Z';
break;
case '\u24AF': // ⒯ [PARENTHESIZED LATIN SMALL LETTER T]
output[outputPos++] = '(';
output[outputPos++] = 't';
output[outputPos++] = ')';
break;
case '\u02A8': // ʨ [LATIN SMALL LETTER TC DIGRAPH WITH CURL]
output[outputPos++] = 't';
output[outputPos++] = 'c';
break;
case '\u00FE': // þ [LATIN SMALL LETTER THORN]
case '\u1D7A': // ᵺ [LATIN SMALL LETTER TH WITH STRIKETHROUGH]
case '\uA767': // ꝧ [LATIN SMALL LETTER THORN WITH STROKE THROUGH DESCENDER]
output[outputPos++] = 't';
output[outputPos++] = 'h';
break;
case '\u02A6': // ʦ [LATIN SMALL LETTER TS DIGRAPH]
output[outputPos++] = 't';
output[outputPos++] = 's';
break;
case '\uA729': // ꜩ [LATIN SMALL LETTER TZ]
output[outputPos++] = 't';
output[outputPos++] = 'z';
break;
case '\u00D9': // Ù [LATIN CAPITAL LETTER U WITH GRAVE]
case '\u00DA': // Ú [LATIN CAPITAL LETTER U WITH ACUTE]
case '\u00DB': // Û [LATIN CAPITAL LETTER U WITH CIRCUMFLEX]
case '\u00DC': // Ü [LATIN CAPITAL LETTER U WITH DIAERESIS]
case '\u0168': // Ũ [LATIN CAPITAL LETTER U WITH TILDE]
case '\u016A': // Ū [LATIN CAPITAL LETTER U WITH MACRON]
case '\u016C': // Ŭ [LATIN CAPITAL LETTER U WITH BREVE]
case '\u016E': // Ů [LATIN CAPITAL LETTER U WITH RING ABOVE]
case '\u0170': // Ű [LATIN CAPITAL LETTER U WITH DOUBLE ACUTE]
case '\u0172': // Ų [LATIN CAPITAL LETTER U WITH OGONEK]
case '\u01AF': // Ư [LATIN CAPITAL LETTER U WITH HORN]
case '\u01D3': // Ǔ [LATIN CAPITAL LETTER U WITH CARON]
case '\u01D5': // Ǖ [LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON]
case '\u01D7': // Ǘ [LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE]
case '\u01D9': // Ǚ [LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON]
case '\u01DB': // Ǜ [LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE]
case '\u0214': // Ȕ [LATIN CAPITAL LETTER U WITH DOUBLE GRAVE]
case '\u0216': // Ȗ [LATIN CAPITAL LETTER U WITH INVERTED BREVE]
case '\u0244': // Ʉ [LATIN CAPITAL LETTER U BAR]
case '\u1D1C': // ᴜ [LATIN LETTER SMALL CAPITAL U]
case '\u1D7E': // ᵾ [LATIN SMALL CAPITAL LETTER U WITH STROKE]
case '\u1E72': // Ṳ [LATIN CAPITAL LETTER U WITH DIAERESIS BELOW]
case '\u1E74': // Ṵ [LATIN CAPITAL LETTER U WITH TILDE BELOW]
case '\u1E76': // Ṷ [LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW]
case '\u1E78': // Ṹ [LATIN CAPITAL LETTER U WITH TILDE AND ACUTE]
case '\u1E7A': // Ṻ [LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS]
case '\u1EE4': // Ụ [LATIN CAPITAL LETTER U WITH DOT BELOW]
case '\u1EE6': // Ủ [LATIN CAPITAL LETTER U WITH HOOK ABOVE]
case '\u1EE8': // Ứ [LATIN CAPITAL LETTER U WITH HORN AND ACUTE]
case '\u1EEA': // Ừ [LATIN CAPITAL LETTER U WITH HORN AND GRAVE]
case '\u1EEC': // Ử [LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE]
case '\u1EEE': // Ữ [LATIN CAPITAL LETTER U WITH HORN AND TILDE]
case '\u1EF0': // Ự [LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW]
case '\u24CA': // Ⓤ [CIRCLED LATIN CAPITAL LETTER U]
case '\uFF35': // U [FULLWIDTH LATIN CAPITAL LETTER U]
output[outputPos++] = 'U';
break;
case '\u00F9': // ù [LATIN SMALL LETTER U WITH GRAVE]
case '\u00FA': // ú [LATIN SMALL LETTER U WITH ACUTE]
case '\u00FB': // û [LATIN SMALL LETTER U WITH CIRCUMFLEX]
case '\u00FC': // ü [LATIN SMALL LETTER U WITH DIAERESIS]
case '\u0169': // ũ [LATIN SMALL LETTER U WITH TILDE]
case '\u016B': // ū [LATIN SMALL LETTER U WITH MACRON]
case '\u016D': // ŭ [LATIN SMALL LETTER U WITH BREVE]
case '\u016F': // ů [LATIN SMALL LETTER U WITH RING ABOVE]
case '\u0171': // ű [LATIN SMALL LETTER U WITH DOUBLE ACUTE]
case '\u0173': // ų [LATIN SMALL LETTER U WITH OGONEK]
case '\u01B0': // ư [LATIN SMALL LETTER U WITH HORN]
case '\u01D4': // ǔ [LATIN SMALL LETTER U WITH CARON]
case '\u01D6': // ǖ [LATIN SMALL LETTER U WITH DIAERESIS AND MACRON]
case '\u01D8': // ǘ [LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE]
case '\u01DA': // ǚ [LATIN SMALL LETTER U WITH DIAERESIS AND CARON]
case '\u01DC': // ǜ [LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE]
case '\u0215': // ȕ [LATIN SMALL LETTER U WITH DOUBLE GRAVE]
case '\u0217': // ȗ [LATIN SMALL LETTER U WITH INVERTED BREVE]
case '\u0289': // ʉ [LATIN SMALL LETTER U BAR]
case '\u1D64': // ᵤ [LATIN SUBSCRIPT SMALL LETTER U]
case '\u1D99': // ᶙ [LATIN SMALL LETTER U WITH RETROFLEX HOOK]
case '\u1E73': // ṳ [LATIN SMALL LETTER U WITH DIAERESIS BELOW]
case '\u1E75': // ṵ [LATIN SMALL LETTER U WITH TILDE BELOW]
case '\u1E77': // ṷ [LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW]
case '\u1E79': // ṹ [LATIN SMALL LETTER U WITH TILDE AND ACUTE]
case '\u1E7B': // ṻ [LATIN SMALL LETTER U WITH MACRON AND DIAERESIS]
case '\u1EE5': // ụ [LATIN SMALL LETTER U WITH DOT BELOW]
case '\u1EE7': // ủ [LATIN SMALL LETTER U WITH HOOK ABOVE]
case '\u1EE9': // ứ [LATIN SMALL LETTER U WITH HORN AND ACUTE]
case '\u1EEB': // ừ [LATIN SMALL LETTER U WITH HORN AND GRAVE]
case '\u1EED': // ử [LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE]
case '\u1EEF': // ữ [LATIN SMALL LETTER U WITH HORN AND TILDE]
case '\u1EF1': // ự [LATIN SMALL LETTER U WITH HORN AND DOT BELOW]
case '\u24E4': // ⓤ [CIRCLED LATIN SMALL LETTER U]
case '\uFF55': // u [FULLWIDTH LATIN SMALL LETTER U]
output[outputPos++] = 'u';
break;
case '\u24B0': // ⒰ [PARENTHESIZED LATIN SMALL LETTER U]
output[outputPos++] = '(';
output[outputPos++] = 'u';
output[outputPos++] = ')';
break;
case '\u1D6B': // ᵫ [LATIN SMALL LETTER UE]
output[outputPos++] = 'u';
output[outputPos++] = 'e';
break;
case '\u01B2': // Ʋ [LATIN CAPITAL LETTER V WITH HOOK]
case '\u0245': // Ʌ [LATIN CAPITAL LETTER TURNED V]
case '\u1D20': // ᴠ [LATIN LETTER SMALL CAPITAL V]
case '\u1E7C': // Ṽ [LATIN CAPITAL LETTER V WITH TILDE]
case '\u1E7E': // Ṿ [LATIN CAPITAL LETTER V WITH DOT BELOW]
case '\u1EFC': // Ỽ [LATIN CAPITAL LETTER MIDDLE-WELSH V]
case '\u24CB': // Ⓥ [CIRCLED LATIN CAPITAL LETTER V]
case '\uA75E': // Ꝟ [LATIN CAPITAL LETTER V WITH DIAGONAL STROKE]
case '\uA768': // Ꝩ [LATIN CAPITAL LETTER VEND]
case '\uFF36': // V [FULLWIDTH LATIN CAPITAL LETTER V]
output[outputPos++] = 'V';
break;
case '\u028B': // ʋ [LATIN SMALL LETTER V WITH HOOK]
case '\u028C': // ʌ [LATIN SMALL LETTER TURNED V]
case '\u1D65': // ᵥ [LATIN SUBSCRIPT SMALL LETTER V]
case '\u1D8C': // ᶌ [LATIN SMALL LETTER V WITH PALATAL HOOK]
case '\u1E7D': // ṽ [LATIN SMALL LETTER V WITH TILDE]
case '\u1E7F': // ṿ [LATIN SMALL LETTER V WITH DOT BELOW]
case '\u24E5': // ⓥ [CIRCLED LATIN SMALL LETTER V]
case '\u2C71': // ⱱ [LATIN SMALL LETTER V WITH RIGHT HOOK]
case '\u2C74': // ⱴ [LATIN SMALL LETTER V WITH CURL]
case '\uA75F': // ꝟ [LATIN SMALL LETTER V WITH DIAGONAL STROKE]
case '\uFF56': // v [FULLWIDTH LATIN SMALL LETTER V]
output[outputPos++] = 'v';
break;
case '\uA760': // Ꝡ [LATIN CAPITAL LETTER VY]
output[outputPos++] = 'V';
output[outputPos++] = 'Y';
break;
case '\u24B1': // ⒱ [PARENTHESIZED LATIN SMALL LETTER V]
output[outputPos++] = '(';
output[outputPos++] = 'v';
output[outputPos++] = ')';
break;
case '\uA761': // ꝡ [LATIN SMALL LETTER VY]
output[outputPos++] = 'v';
output[outputPos++] = 'y';
break;
case '\u0174': // Ŵ [LATIN CAPITAL LETTER W WITH CIRCUMFLEX]
case '\u01F7': // Ƿ http://en.wikipedia.org/wiki/Wynn [LATIN CAPITAL LETTER WYNN]
case '\u1D21': // ᴡ [LATIN LETTER SMALL CAPITAL W]
case '\u1E80': // Ẁ [LATIN CAPITAL LETTER W WITH GRAVE]
case '\u1E82': // Ẃ [LATIN CAPITAL LETTER W WITH ACUTE]
case '\u1E84': // Ẅ [LATIN CAPITAL LETTER W WITH DIAERESIS]
case '\u1E86': // Ẇ [LATIN CAPITAL LETTER W WITH DOT ABOVE]
case '\u1E88': // Ẉ [LATIN CAPITAL LETTER W WITH DOT BELOW]
case '\u24CC': // Ⓦ [CIRCLED LATIN CAPITAL LETTER W]
case '\u2C72': // Ⱳ [LATIN CAPITAL LETTER W WITH HOOK]
case '\uFF37': // W [FULLWIDTH LATIN CAPITAL LETTER W]
output[outputPos++] = 'W';
break;
case '\u0175': // ŵ [LATIN SMALL LETTER W WITH CIRCUMFLEX]
case '\u01BF': // ƿ http://en.wikipedia.org/wiki/Wynn [LATIN LETTER WYNN]
case '\u028D': // ʍ [LATIN SMALL LETTER TURNED W]
case '\u1E81': // ẁ [LATIN SMALL LETTER W WITH GRAVE]
case '\u1E83': // ẃ [LATIN SMALL LETTER W WITH ACUTE]
case '\u1E85': // ẅ [LATIN SMALL LETTER W WITH DIAERESIS]
case '\u1E87': // ẇ [LATIN SMALL LETTER W WITH DOT ABOVE]
case '\u1E89': // ẉ [LATIN SMALL LETTER W WITH DOT BELOW]
case '\u1E98': // ẘ [LATIN SMALL LETTER W WITH RING ABOVE]
case '\u24E6': // ⓦ [CIRCLED LATIN SMALL LETTER W]
case '\u2C73': // ⱳ [LATIN SMALL LETTER W WITH HOOK]
case '\uFF57': // w [FULLWIDTH LATIN SMALL LETTER W]
output[outputPos++] = 'w';
break;
case '\u24B2': // ⒲ [PARENTHESIZED LATIN SMALL LETTER W]
output[outputPos++] = '(';
output[outputPos++] = 'w';
output[outputPos++] = ')';
break;
case '\u1E8A': // Ẋ [LATIN CAPITAL LETTER X WITH DOT ABOVE]
case '\u1E8C': // Ẍ [LATIN CAPITAL LETTER X WITH DIAERESIS]
case '\u24CD': // Ⓧ [CIRCLED LATIN CAPITAL LETTER X]
case '\uFF38': // X [FULLWIDTH LATIN CAPITAL LETTER X]
output[outputPos++] = 'X';
break;
case '\u1D8D': // ᶍ [LATIN SMALL LETTER X WITH PALATAL HOOK]
case '\u1E8B': // ẋ [LATIN SMALL LETTER X WITH DOT ABOVE]
case '\u1E8D': // ẍ [LATIN SMALL LETTER X WITH DIAERESIS]
case '\u2093': // ₓ [LATIN SUBSCRIPT SMALL LETTER X]
case '\u24E7': // ⓧ [CIRCLED LATIN SMALL LETTER X]
case '\uFF58': // x [FULLWIDTH LATIN SMALL LETTER X]
output[outputPos++] = 'x';
break;
case '\u24B3': // ⒳ [PARENTHESIZED LATIN SMALL LETTER X]
output[outputPos++] = '(';
output[outputPos++] = 'x';
output[outputPos++] = ')';
break;
case '\u00DD': // Ý [LATIN CAPITAL LETTER Y WITH ACUTE]
case '\u0176': // Ŷ [LATIN CAPITAL LETTER Y WITH CIRCUMFLEX]
case '\u0178': // Ÿ [LATIN CAPITAL LETTER Y WITH DIAERESIS]
case '\u01B3': // Ƴ [LATIN CAPITAL LETTER Y WITH HOOK]
case '\u0232': // Ȳ [LATIN CAPITAL LETTER Y WITH MACRON]
case '\u024E': // Ɏ [LATIN CAPITAL LETTER Y WITH STROKE]
case '\u028F': // ʏ [LATIN LETTER SMALL CAPITAL Y]
case '\u1E8E': // Ẏ [LATIN CAPITAL LETTER Y WITH DOT ABOVE]
case '\u1EF2': // Ỳ [LATIN CAPITAL LETTER Y WITH GRAVE]
case '\u1EF4': // Ỵ [LATIN CAPITAL LETTER Y WITH DOT BELOW]
case '\u1EF6': // Ỷ [LATIN CAPITAL LETTER Y WITH HOOK ABOVE]
case '\u1EF8': // Ỹ [LATIN CAPITAL LETTER Y WITH TILDE]
case '\u1EFE': // Ỿ [LATIN CAPITAL LETTER Y WITH LOOP]
case '\u24CE': // Ⓨ [CIRCLED LATIN CAPITAL LETTER Y]
case '\uFF39': // Y [FULLWIDTH LATIN CAPITAL LETTER Y]
output[outputPos++] = 'Y';
break;
case '\u00FD': // ý [LATIN SMALL LETTER Y WITH ACUTE]
case '\u00FF': // ÿ [LATIN SMALL LETTER Y WITH DIAERESIS]
case '\u0177': // ŷ [LATIN SMALL LETTER Y WITH CIRCUMFLEX]
case '\u01B4': // ƴ [LATIN SMALL LETTER Y WITH HOOK]
case '\u0233': // ȳ [LATIN SMALL LETTER Y WITH MACRON]
case '\u024F': // ɏ [LATIN SMALL LETTER Y WITH STROKE]
case '\u028E': // ʎ [LATIN SMALL LETTER TURNED Y]
case '\u1E8F': // ẏ [LATIN SMALL LETTER Y WITH DOT ABOVE]
case '\u1E99': // ẙ [LATIN SMALL LETTER Y WITH RING ABOVE]
case '\u1EF3': // ỳ [LATIN SMALL LETTER Y WITH GRAVE]
case '\u1EF5': // ỵ [LATIN SMALL LETTER Y WITH DOT BELOW]
case '\u1EF7': // ỷ [LATIN SMALL LETTER Y WITH HOOK ABOVE]
case '\u1EF9': // ỹ [LATIN SMALL LETTER Y WITH TILDE]
case '\u1EFF': // ỿ [LATIN SMALL LETTER Y WITH LOOP]
case '\u24E8': // ⓨ [CIRCLED LATIN SMALL LETTER Y]
case '\uFF59': // y [FULLWIDTH LATIN SMALL LETTER Y]
output[outputPos++] = 'y';
break;
case '\u24B4': // ⒴ [PARENTHESIZED LATIN SMALL LETTER Y]
output[outputPos++] = '(';
output[outputPos++] = 'y';
output[outputPos++] = ')';
break;
case '\u0179': // Ź [LATIN CAPITAL LETTER Z WITH ACUTE]
case '\u017B': // Ż [LATIN CAPITAL LETTER Z WITH DOT ABOVE]
case '\u017D': // Ž [LATIN CAPITAL LETTER Z WITH CARON]
case '\u01B5': // Ƶ [LATIN CAPITAL LETTER Z WITH STROKE]
case '\u021C': // Ȝ http://en.wikipedia.org/wiki/Yogh [LATIN CAPITAL LETTER YOGH]
case '\u0224': // Ȥ [LATIN CAPITAL LETTER Z WITH HOOK]
case '\u1D22': // ᴢ [LATIN LETTER SMALL CAPITAL Z]
case '\u1E90': // Ẑ [LATIN CAPITAL LETTER Z WITH CIRCUMFLEX]
case '\u1E92': // Ẓ [LATIN CAPITAL LETTER Z WITH DOT BELOW]
case '\u1E94': // Ẕ [LATIN CAPITAL LETTER Z WITH LINE BELOW]
case '\u24CF': // Ⓩ [CIRCLED LATIN CAPITAL LETTER Z]
case '\u2C6B': // Ⱬ [LATIN CAPITAL LETTER Z WITH DESCENDER]
case '\uA762': // Ꝣ [LATIN CAPITAL LETTER VISIGOTHIC Z]
case '\uFF3A': // Z [FULLWIDTH LATIN CAPITAL LETTER Z]
output[outputPos++] = 'Z';
break;
case '\u017A': // ź [LATIN SMALL LETTER Z WITH ACUTE]
case '\u017C': // ż [LATIN SMALL LETTER Z WITH DOT ABOVE]
case '\u017E': // ž [LATIN SMALL LETTER Z WITH CARON]
case '\u01B6': // ƶ [LATIN SMALL LETTER Z WITH STROKE]
case '\u021D': // ȝ http://en.wikipedia.org/wiki/Yogh [LATIN SMALL LETTER YOGH]
case '\u0225': // ȥ [LATIN SMALL LETTER Z WITH HOOK]
case '\u0240': // ɀ [LATIN SMALL LETTER Z WITH SWASH TAIL]
case '\u0290': // ʐ [LATIN SMALL LETTER Z WITH RETROFLEX HOOK]
case '\u0291': // ʑ [LATIN SMALL LETTER Z WITH CURL]
case '\u1D76': // ᵶ [LATIN SMALL LETTER Z WITH MIDDLE TILDE]
case '\u1D8E': // ᶎ [LATIN SMALL LETTER Z WITH PALATAL HOOK]
case '\u1E91': // ẑ [LATIN SMALL LETTER Z WITH CIRCUMFLEX]
case '\u1E93': // ẓ [LATIN SMALL LETTER Z WITH DOT BELOW]
case '\u1E95': // ẕ [LATIN SMALL LETTER Z WITH LINE BELOW]
case '\u24E9': // ⓩ [CIRCLED LATIN SMALL LETTER Z]
case '\u2C6C': // ⱬ [LATIN SMALL LETTER Z WITH DESCENDER]
case '\uA763': // ꝣ [LATIN SMALL LETTER VISIGOTHIC Z]
case '\uFF5A': // z [FULLWIDTH LATIN SMALL LETTER Z]
output[outputPos++] = 'z';
break;
case '\u24B5': // ⒵ [PARENTHESIZED LATIN SMALL LETTER Z]
output[outputPos++] = '(';
output[outputPos++] = 'z';
output[outputPos++] = ')';
break;
case '\u2070': // ⁰ [SUPERSCRIPT ZERO]
case '\u2080': // ₀ [SUBSCRIPT ZERO]
case '\u24EA': // ⓪ [CIRCLED DIGIT ZERO]
case '\u24FF': // ⓿ [NEGATIVE CIRCLED DIGIT ZERO]
case '\uFF10': // 0 [FULLWIDTH DIGIT ZERO]
output[outputPos++] = '0';
break;
case '\u00B9': // ¹ [SUPERSCRIPT ONE]
case '\u2081': // ₁ [SUBSCRIPT ONE]
case '\u2460': // ① [CIRCLED DIGIT ONE]
case '\u24F5': // ⓵ [DOUBLE CIRCLED DIGIT ONE]
case '\u2776': // ❶ [DINGBAT NEGATIVE CIRCLED DIGIT ONE]
case '\u2780': // ➀ [DINGBAT CIRCLED SANS-SERIF DIGIT ONE]
case '\u278A': // ➊ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT ONE]
case '\uFF11': // 1 [FULLWIDTH DIGIT ONE]
output[outputPos++] = '1';
break;
case '\u2488': // ⒈ [DIGIT ONE FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '.';
break;
case '\u2474': // ⑴ [PARENTHESIZED DIGIT ONE]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = ')';
break;
case '\u00B2': // ² [SUPERSCRIPT TWO]
case '\u2082': // ₂ [SUBSCRIPT TWO]
case '\u2461': // ② [CIRCLED DIGIT TWO]
case '\u24F6': // ⓶ [DOUBLE CIRCLED DIGIT TWO]
case '\u2777': // ❷ [DINGBAT NEGATIVE CIRCLED DIGIT TWO]
case '\u2781': // ➁ [DINGBAT CIRCLED SANS-SERIF DIGIT TWO]
case '\u278B': // ➋ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT TWO]
case '\uFF12': // 2 [FULLWIDTH DIGIT TWO]
output[outputPos++] = '2';
break;
case '\u2489': // ⒉ [DIGIT TWO FULL STOP]
output[outputPos++] = '2';
output[outputPos++] = '.';
break;
case '\u2475': // ⑵ [PARENTHESIZED DIGIT TWO]
output[outputPos++] = '(';
output[outputPos++] = '2';
output[outputPos++] = ')';
break;
case '\u00B3': // ³ [SUPERSCRIPT THREE]
case '\u2083': // ₃ [SUBSCRIPT THREE]
case '\u2462': // ③ [CIRCLED DIGIT THREE]
case '\u24F7': // ⓷ [DOUBLE CIRCLED DIGIT THREE]
case '\u2778': // ❸ [DINGBAT NEGATIVE CIRCLED DIGIT THREE]
case '\u2782': // ➂ [DINGBAT CIRCLED SANS-SERIF DIGIT THREE]
case '\u278C': // ➌ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT THREE]
case '\uFF13': // 3 [FULLWIDTH DIGIT THREE]
output[outputPos++] = '3';
break;
case '\u248A': // ⒊ [DIGIT THREE FULL STOP]
output[outputPos++] = '3';
output[outputPos++] = '.';
break;
case '\u2476': // ⑶ [PARENTHESIZED DIGIT THREE]
output[outputPos++] = '(';
output[outputPos++] = '3';
output[outputPos++] = ')';
break;
case '\u2074': // ⁴ [SUPERSCRIPT FOUR]
case '\u2084': // ₄ [SUBSCRIPT FOUR]
case '\u2463': // ④ [CIRCLED DIGIT FOUR]
case '\u24F8': // ⓸ [DOUBLE CIRCLED DIGIT FOUR]
case '\u2779': // ❹ [DINGBAT NEGATIVE CIRCLED DIGIT FOUR]
case '\u2783': // ➃ [DINGBAT CIRCLED SANS-SERIF DIGIT FOUR]
case '\u278D': // ➍ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FOUR]
case '\uFF14': // 4 [FULLWIDTH DIGIT FOUR]
output[outputPos++] = '4';
break;
case '\u248B': // ⒋ [DIGIT FOUR FULL STOP]
output[outputPos++] = '4';
output[outputPos++] = '.';
break;
case '\u2477': // ⑷ [PARENTHESIZED DIGIT FOUR]
output[outputPos++] = '(';
output[outputPos++] = '4';
output[outputPos++] = ')';
break;
case '\u2075': // ⁵ [SUPERSCRIPT FIVE]
case '\u2085': // ₅ [SUBSCRIPT FIVE]
case '\u2464': // ⑤ [CIRCLED DIGIT FIVE]
case '\u24F9': // ⓹ [DOUBLE CIRCLED DIGIT FIVE]
case '\u277A': // ❺ [DINGBAT NEGATIVE CIRCLED DIGIT FIVE]
case '\u2784': // ➄ [DINGBAT CIRCLED SANS-SERIF DIGIT FIVE]
case '\u278E': // ➎ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT FIVE]
case '\uFF15': // 5 [FULLWIDTH DIGIT FIVE]
output[outputPos++] = '5';
break;
case '\u248C': // ⒌ [DIGIT FIVE FULL STOP]
output[outputPos++] = '5';
output[outputPos++] = '.';
break;
case '\u2478': // ⑸ [PARENTHESIZED DIGIT FIVE]
output[outputPos++] = '(';
output[outputPos++] = '5';
output[outputPos++] = ')';
break;
case '\u2076': // ⁶ [SUPERSCRIPT SIX]
case '\u2086': // ₆ [SUBSCRIPT SIX]
case '\u2465': // ⑥ [CIRCLED DIGIT SIX]
case '\u24FA': // ⓺ [DOUBLE CIRCLED DIGIT SIX]
case '\u277B': // ❻ [DINGBAT NEGATIVE CIRCLED DIGIT SIX]
case '\u2785': // ➅ [DINGBAT CIRCLED SANS-SERIF DIGIT SIX]
case '\u278F': // ➏ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SIX]
case '\uFF16': // 6 [FULLWIDTH DIGIT SIX]
output[outputPos++] = '6';
break;
case '\u248D': // ⒍ [DIGIT SIX FULL STOP]
output[outputPos++] = '6';
output[outputPos++] = '.';
break;
case '\u2479': // ⑹ [PARENTHESIZED DIGIT SIX]
output[outputPos++] = '(';
output[outputPos++] = '6';
output[outputPos++] = ')';
break;
case '\u2077': // ⁷ [SUPERSCRIPT SEVEN]
case '\u2087': // ₇ [SUBSCRIPT SEVEN]
case '\u2466': // ⑦ [CIRCLED DIGIT SEVEN]
case '\u24FB': // ⓻ [DOUBLE CIRCLED DIGIT SEVEN]
case '\u277C': // ❼ [DINGBAT NEGATIVE CIRCLED DIGIT SEVEN]
case '\u2786': // ➆ [DINGBAT CIRCLED SANS-SERIF DIGIT SEVEN]
case '\u2790': // ➐ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT SEVEN]
case '\uFF17': // 7 [FULLWIDTH DIGIT SEVEN]
output[outputPos++] = '7';
break;
case '\u248E': // ⒎ [DIGIT SEVEN FULL STOP]
output[outputPos++] = '7';
output[outputPos++] = '.';
break;
case '\u247A': // ⑺ [PARENTHESIZED DIGIT SEVEN]
output[outputPos++] = '(';
output[outputPos++] = '7';
output[outputPos++] = ')';
break;
case '\u2078': // ⁸ [SUPERSCRIPT EIGHT]
case '\u2088': // ₈ [SUBSCRIPT EIGHT]
case '\u2467': // ⑧ [CIRCLED DIGIT EIGHT]
case '\u24FC': // ⓼ [DOUBLE CIRCLED DIGIT EIGHT]
case '\u277D': // ❽ [DINGBAT NEGATIVE CIRCLED DIGIT EIGHT]
case '\u2787': // ➇ [DINGBAT CIRCLED SANS-SERIF DIGIT EIGHT]
case '\u2791': // ➑ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT EIGHT]
case '\uFF18': // 8 [FULLWIDTH DIGIT EIGHT]
output[outputPos++] = '8';
break;
case '\u248F': // ⒏ [DIGIT EIGHT FULL STOP]
output[outputPos++] = '8';
output[outputPos++] = '.';
break;
case '\u247B': // ⑻ [PARENTHESIZED DIGIT EIGHT]
output[outputPos++] = '(';
output[outputPos++] = '8';
output[outputPos++] = ')';
break;
case '\u2079': // ⁹ [SUPERSCRIPT NINE]
case '\u2089': // ₉ [SUBSCRIPT NINE]
case '\u2468': // ⑨ [CIRCLED DIGIT NINE]
case '\u24FD': // ⓽ [DOUBLE CIRCLED DIGIT NINE]
case '\u277E': // ❾ [DINGBAT NEGATIVE CIRCLED DIGIT NINE]
case '\u2788': // ➈ [DINGBAT CIRCLED SANS-SERIF DIGIT NINE]
case '\u2792': // ➒ [DINGBAT NEGATIVE CIRCLED SANS-SERIF DIGIT NINE]
case '\uFF19': // 9 [FULLWIDTH DIGIT NINE]
output[outputPos++] = '9';
break;
case '\u2490': // ⒐ [DIGIT NINE FULL STOP]
output[outputPos++] = '9';
output[outputPos++] = '.';
break;
case '\u247C': // ⑼ [PARENTHESIZED DIGIT NINE]
output[outputPos++] = '(';
output[outputPos++] = '9';
output[outputPos++] = ')';
break;
case '\u2469': // ⑩ [CIRCLED NUMBER TEN]
case '\u24FE': // ⓾ [DOUBLE CIRCLED NUMBER TEN]
case '\u277F': // ❿ [DINGBAT NEGATIVE CIRCLED NUMBER TEN]
case '\u2789': // ➉ [DINGBAT CIRCLED SANS-SERIF NUMBER TEN]
case '\u2793': // ➓ [DINGBAT NEGATIVE CIRCLED SANS-SERIF NUMBER TEN]
output[outputPos++] = '1';
output[outputPos++] = '0';
break;
case '\u2491': // ⒑ [NUMBER TEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '0';
output[outputPos++] = '.';
break;
case '\u247D': // ⑽ [PARENTHESIZED NUMBER TEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '0';
output[outputPos++] = ')';
break;
case '\u246A': // ⑪ [CIRCLED NUMBER ELEVEN]
case '\u24EB': // ⓫ [NEGATIVE CIRCLED NUMBER ELEVEN]
output[outputPos++] = '1';
output[outputPos++] = '1';
break;
case '\u2492': // ⒒ [NUMBER ELEVEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '1';
output[outputPos++] = '.';
break;
case '\u247E': // ⑾ [PARENTHESIZED NUMBER ELEVEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '1';
output[outputPos++] = ')';
break;
case '\u246B': // ⑫ [CIRCLED NUMBER TWELVE]
case '\u24EC': // ⓬ [NEGATIVE CIRCLED NUMBER TWELVE]
output[outputPos++] = '1';
output[outputPos++] = '2';
break;
case '\u2493': // ⒓ [NUMBER TWELVE FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '2';
output[outputPos++] = '.';
break;
case '\u247F': // ⑿ [PARENTHESIZED NUMBER TWELVE]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '2';
output[outputPos++] = ')';
break;
case '\u246C': // ⑬ [CIRCLED NUMBER THIRTEEN]
case '\u24ED': // ⓭ [NEGATIVE CIRCLED NUMBER THIRTEEN]
output[outputPos++] = '1';
output[outputPos++] = '3';
break;
case '\u2494': // ⒔ [NUMBER THIRTEEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '3';
output[outputPos++] = '.';
break;
case '\u2480': // ⒀ [PARENTHESIZED NUMBER THIRTEEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '3';
output[outputPos++] = ')';
break;
case '\u246D': // ⑭ [CIRCLED NUMBER FOURTEEN]
case '\u24EE': // ⓮ [NEGATIVE CIRCLED NUMBER FOURTEEN]
output[outputPos++] = '1';
output[outputPos++] = '4';
break;
case '\u2495': // ⒕ [NUMBER FOURTEEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '4';
output[outputPos++] = '.';
break;
case '\u2481': // ⒁ [PARENTHESIZED NUMBER FOURTEEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '4';
output[outputPos++] = ')';
break;
case '\u246E': // ⑮ [CIRCLED NUMBER FIFTEEN]
case '\u24EF': // ⓯ [NEGATIVE CIRCLED NUMBER FIFTEEN]
output[outputPos++] = '1';
output[outputPos++] = '5';
break;
case '\u2496': // ⒖ [NUMBER FIFTEEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '5';
output[outputPos++] = '.';
break;
case '\u2482': // ⒂ [PARENTHESIZED NUMBER FIFTEEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '5';
output[outputPos++] = ')';
break;
case '\u246F': // ⑯ [CIRCLED NUMBER SIXTEEN]
case '\u24F0': // ⓰ [NEGATIVE CIRCLED NUMBER SIXTEEN]
output[outputPos++] = '1';
output[outputPos++] = '6';
break;
case '\u2497': // ⒗ [NUMBER SIXTEEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '6';
output[outputPos++] = '.';
break;
case '\u2483': // ⒃ [PARENTHESIZED NUMBER SIXTEEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '6';
output[outputPos++] = ')';
break;
case '\u2470': // ⑰ [CIRCLED NUMBER SEVENTEEN]
case '\u24F1': // ⓱ [NEGATIVE CIRCLED NUMBER SEVENTEEN]
output[outputPos++] = '1';
output[outputPos++] = '7';
break;
case '\u2498': // ⒘ [NUMBER SEVENTEEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '7';
output[outputPos++] = '.';
break;
case '\u2484': // ⒄ [PARENTHESIZED NUMBER SEVENTEEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '7';
output[outputPos++] = ')';
break;
case '\u2471': // ⑱ [CIRCLED NUMBER EIGHTEEN]
case '\u24F2': // ⓲ [NEGATIVE CIRCLED NUMBER EIGHTEEN]
output[outputPos++] = '1';
output[outputPos++] = '8';
break;
case '\u2499': // ⒙ [NUMBER EIGHTEEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '8';
output[outputPos++] = '.';
break;
case '\u2485': // ⒅ [PARENTHESIZED NUMBER EIGHTEEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '8';
output[outputPos++] = ')';
break;
case '\u2472': // ⑲ [CIRCLED NUMBER NINETEEN]
case '\u24F3': // ⓳ [NEGATIVE CIRCLED NUMBER NINETEEN]
output[outputPos++] = '1';
output[outputPos++] = '9';
break;
case '\u249A': // ⒚ [NUMBER NINETEEN FULL STOP]
output[outputPos++] = '1';
output[outputPos++] = '9';
output[outputPos++] = '.';
break;
case '\u2486': // ⒆ [PARENTHESIZED NUMBER NINETEEN]
output[outputPos++] = '(';
output[outputPos++] = '1';
output[outputPos++] = '9';
output[outputPos++] = ')';
break;
case '\u2473': // ⑳ [CIRCLED NUMBER TWENTY]
case '\u24F4': // ⓴ [NEGATIVE CIRCLED NUMBER TWENTY]
output[outputPos++] = '2';
output[outputPos++] = '0';
break;
case '\u249B': // ⒛ [NUMBER TWENTY FULL STOP]
output[outputPos++] = '2';
output[outputPos++] = '0';
output[outputPos++] = '.';
break;
case '\u2487': // ⒇ [PARENTHESIZED NUMBER TWENTY]
output[outputPos++] = '(';
output[outputPos++] = '2';
output[outputPos++] = '0';
output[outputPos++] = ')';
break;
case '\u00AB': // « [LEFT-POINTING DOUBLE ANGLE QUOTATION MARK]
case '\u00BB': // » [RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK]
case '\u201C': // “ [LEFT DOUBLE QUOTATION MARK]
case '\u201D': // ” [RIGHT DOUBLE QUOTATION MARK]
case '\u201E': // „ [DOUBLE LOW-9 QUOTATION MARK]
case '\u2033': // ″ [DOUBLE PRIME]
case '\u2036': // ‶ [REVERSED DOUBLE PRIME]
case '\u275D': // ❝ [HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT]
case '\u275E': // ❞ [HEAVY DOUBLE COMMA QUOTATION MARK ORNAMENT]
case '\u276E': // ❮ [HEAVY LEFT-POINTING ANGLE QUOTATION MARK ORNAMENT]
case '\u276F': // ❯ [HEAVY RIGHT-POINTING ANGLE QUOTATION MARK ORNAMENT]
case '\uFF02': // " [FULLWIDTH QUOTATION MARK]
output[outputPos++] = '"';
break;
case '\u2018': // ‘ [LEFT SINGLE QUOTATION MARK]
case '\u2019': // ’ [RIGHT SINGLE QUOTATION MARK]
case '\u201A': // ‚ [SINGLE LOW-9 QUOTATION MARK]
case '\u201B': // ‛ [SINGLE HIGH-REVERSED-9 QUOTATION MARK]
case '\u2032': // ′ [PRIME]
case '\u2035': // ‵ [REVERSED PRIME]
case '\u2039': // ‹ [SINGLE LEFT-POINTING ANGLE QUOTATION MARK]
case '\u203A': // › [SINGLE RIGHT-POINTING ANGLE QUOTATION MARK]
case '\u275B': // ❛ [HEAVY SINGLE TURNED COMMA QUOTATION MARK ORNAMENT]
case '\u275C': // ❜ [HEAVY SINGLE COMMA QUOTATION MARK ORNAMENT]
case '\uFF07': // ' [FULLWIDTH APOSTROPHE]
output[outputPos++] = '\'';
break;
case '\u2010': // ‐ [HYPHEN]
case '\u2011': // ‑ [NON-BREAKING HYPHEN]
case '\u2012': // ‒ [FIGURE DASH]
case '\u2013': // – [EN DASH]
case '\u2014': // — [EM DASH]
case '\u207B': // ⁻ [SUPERSCRIPT MINUS]
case '\u208B': // ₋ [SUBSCRIPT MINUS]
case '\uFF0D': // - [FULLWIDTH HYPHEN-MINUS]
output[outputPos++] = '-';
break;
case '\u2045': // ⁅ [LEFT SQUARE BRACKET WITH QUILL]
case '\u2772': // ❲ [LIGHT LEFT TORTOISE SHELL BRACKET ORNAMENT]
case '\uFF3B': // [ [FULLWIDTH LEFT SQUARE BRACKET]
output[outputPos++] = '[';
break;
case '\u2046': // ⁆ [RIGHT SQUARE BRACKET WITH QUILL]
case '\u2773': // ❳ [LIGHT RIGHT TORTOISE SHELL BRACKET ORNAMENT]
case '\uFF3D': // ] [FULLWIDTH RIGHT SQUARE BRACKET]
output[outputPos++] = ']';
break;
case '\u207D': // ⁽ [SUPERSCRIPT LEFT PARENTHESIS]
case '\u208D': // ₍ [SUBSCRIPT LEFT PARENTHESIS]
case '\u2768': // ❨ [MEDIUM LEFT PARENTHESIS ORNAMENT]
case '\u276A': // ❪ [MEDIUM FLATTENED LEFT PARENTHESIS ORNAMENT]
case '\uFF08': // ( [FULLWIDTH LEFT PARENTHESIS]
output[outputPos++] = '(';
break;
case '\u2E28': // ⸨ [LEFT DOUBLE PARENTHESIS]
output[outputPos++] = '(';
output[outputPos++] = '(';
break;
case '\u207E': // ⁾ [SUPERSCRIPT RIGHT PARENTHESIS]
case '\u208E': // ₎ [SUBSCRIPT RIGHT PARENTHESIS]
case '\u2769': // ❩ [MEDIUM RIGHT PARENTHESIS ORNAMENT]
case '\u276B': // ❫ [MEDIUM FLATTENED RIGHT PARENTHESIS ORNAMENT]
case '\uFF09': // ) [FULLWIDTH RIGHT PARENTHESIS]
output[outputPos++] = ')';
break;
case '\u2E29': // ⸩ [RIGHT DOUBLE PARENTHESIS]
output[outputPos++] = ')';
output[outputPos++] = ')';
break;
case '\u276C': // ❬ [MEDIUM LEFT-POINTING ANGLE BRACKET ORNAMENT]
case '\u2770': // ❰ [HEAVY LEFT-POINTING ANGLE BRACKET ORNAMENT]
case '\uFF1C': // < [FULLWIDTH LESS-THAN SIGN]
output[outputPos++] = '<';
break;
case '\u276D': // ❭ [MEDIUM RIGHT-POINTING ANGLE BRACKET ORNAMENT]
case '\u2771': // ❱ [HEAVY RIGHT-POINTING ANGLE BRACKET ORNAMENT]
case '\uFF1E': // > [FULLWIDTH GREATER-THAN SIGN]
output[outputPos++] = '>';
break;
case '\u2774': // ❴ [MEDIUM LEFT CURLY BRACKET ORNAMENT]
case '\uFF5B': // { [FULLWIDTH LEFT CURLY BRACKET]
output[outputPos++] = '{';
break;
case '\u2775': // ❵ [MEDIUM RIGHT CURLY BRACKET ORNAMENT]
case '\uFF5D': // } [FULLWIDTH RIGHT CURLY BRACKET]
output[outputPos++] = '}';
break;
case '\u207A': // ⁺ [SUPERSCRIPT PLUS SIGN]
case '\u208A': // ₊ [SUBSCRIPT PLUS SIGN]
case '\uFF0B': // + [FULLWIDTH PLUS SIGN]
output[outputPos++] = '+';
break;
case '\u207C': // ⁼ [SUPERSCRIPT EQUALS SIGN]
case '\u208C': // ₌ [SUBSCRIPT EQUALS SIGN]
case '\uFF1D': // = [FULLWIDTH EQUALS SIGN]
output[outputPos++] = '=';
break;
case '\uFF01': // ! [FULLWIDTH EXCLAMATION MARK]
output[outputPos++] = '!';
break;
case '\u203C': // ‼ [DOUBLE EXCLAMATION MARK]
output[outputPos++] = '!';
output[outputPos++] = '!';
break;
case '\u2049': // ⁉ [EXCLAMATION QUESTION MARK]
output[outputPos++] = '!';
output[outputPos++] = '?';
break;
case '\uFF03': // # [FULLWIDTH NUMBER SIGN]
output[outputPos++] = '#';
break;
case '\uFF04': // $ [FULLWIDTH DOLLAR SIGN]
output[outputPos++] = '$';
break;
case '\u2052': // ⁒ [COMMERCIAL MINUS SIGN]
case '\uFF05': // % [FULLWIDTH PERCENT SIGN]
output[outputPos++] = '%';
break;
case '\uFF06': // & [FULLWIDTH AMPERSAND]
output[outputPos++] = '&';
break;
case '\u204E': // ⁎ [LOW ASTERISK]
case '\uFF0A': // * [FULLWIDTH ASTERISK]
output[outputPos++] = '*';
break;
case '\uFF0C': // , [FULLWIDTH COMMA]
output[outputPos++] = ',';
break;
case '\uFF0E': // . [FULLWIDTH FULL STOP]
output[outputPos++] = '.';
break;
case '\u2044': // ⁄ [FRACTION SLASH]
case '\uFF0F': // / [FULLWIDTH SOLIDUS]
output[outputPos++] = '/';
break;
case '\uFF1A': // : [FULLWIDTH COLON]
output[outputPos++] = ':';
break;
case '\u204F': // ⁏ [REVERSED SEMICOLON]
case '\uFF1B': // ; [FULLWIDTH SEMICOLON]
output[outputPos++] = ';';
break;
case '\uFF1F': // ? [FULLWIDTH QUESTION MARK]
output[outputPos++] = '?';
break;
case '\u2047': // ⁇ [DOUBLE QUESTION MARK]
output[outputPos++] = '?';
output[outputPos++] = '?';
break;
case '\u2048': // ⁈ [QUESTION EXCLAMATION MARK]
output[outputPos++] = '?';
output[outputPos++] = '!';
break;
case '\uFF20': // @ [FULLWIDTH COMMERCIAL AT]
output[outputPos++] = '@';
break;
case '\uFF3C': // \ [FULLWIDTH REVERSE SOLIDUS]
output[outputPos++] = '\\';
break;
case '\u2038': // ‸ [CARET]
case '\uFF3E': // ^ [FULLWIDTH CIRCUMFLEX ACCENT]
output[outputPos++] = '^';
break;
case '\uFF3F': // _ [FULLWIDTH LOW LINE]
output[outputPos++] = '_';
break;
case '\u2053': // ⁓ [SWUNG DASH]
case '\uFF5E': // ~ [FULLWIDTH TILDE]
output[outputPos++] = '~';
break;
default:
output[outputPos++] = c;
break;
}
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/ASCIIFoldingFilter.java | Java | art | 112,903 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.AttributeSource;
/** An abstract base class for simple, character-oriented tokenizers.*/
public abstract class CharTokenizer extends Tokenizer {
public CharTokenizer(Reader input) {
super(input);
offsetAtt = addAttribute(OffsetAttribute.class);
termAtt = addAttribute(TermAttribute.class);
}
public CharTokenizer(AttributeSource source, Reader input) {
super(source, input);
offsetAtt = addAttribute(OffsetAttribute.class);
termAtt = addAttribute(TermAttribute.class);
}
public CharTokenizer(AttributeFactory factory, Reader input) {
super(factory, input);
offsetAtt = addAttribute(OffsetAttribute.class);
termAtt = addAttribute(TermAttribute.class);
}
private int offset = 0, bufferIndex = 0, dataLen = 0;
private static final int MAX_WORD_LEN = 255;
private static final int IO_BUFFER_SIZE = 4096;
private final char[] ioBuffer = new char[IO_BUFFER_SIZE];
private TermAttribute termAtt;
private OffsetAttribute offsetAtt;
/** Returns true iff a character should be included in a token. This
* tokenizer generates as tokens adjacent sequences of characters which
* satisfy this predicate. Characters for which this is false are used to
* define token boundaries and are not included in tokens. */
protected abstract boolean isTokenChar(char c);
/** Called on each token character to normalize it before it is added to the
* token. The default implementation does nothing. Subclasses may use this
* to, e.g., lowercase tokens. */
protected char normalize(char c) {
return c;
}
@Override
public final boolean incrementToken() throws IOException {
clearAttributes();
int length = 0;
int start = bufferIndex;
char[] buffer = termAtt.termBuffer();
while (true) {
if (bufferIndex >= dataLen) {
offset += dataLen;
dataLen = input.read(ioBuffer);
if (dataLen == -1) {
dataLen = 0; // so next offset += dataLen won't decrement offset
if (length > 0)
break;
else
return false;
}
bufferIndex = 0;
}
final char c = ioBuffer[bufferIndex++];
if (isTokenChar(c)) { // if it's a token char
if (length == 0) // start of token
start = offset + bufferIndex - 1;
else if (length == buffer.length)
buffer = termAtt.resizeTermBuffer(1+length);
buffer[length++] = normalize(c); // buffer it, normalized
if (length == MAX_WORD_LEN) // buffer overflow!
break;
} else if (length > 0) // at non-Letter w/ chars
break; // return 'em
}
termAtt.setTermLength(length);
offsetAtt.setOffset(correctOffset(start), correctOffset(start+length));
return true;
}
@Override
public final void end() {
// set final offset
int finalOffset = correctOffset(offset);
offsetAtt.setOffset(finalOffset, finalOffset);
}
@Override
public void reset(Reader input) throws IOException {
super.reset(input);
bufferIndex = 0;
offset = 0;
dataLen = 0;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/CharTokenizer.java | Java | art | 4,249 |
package org.apache.lucene.analysis;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A simple class that stores Strings as char[]'s in a
* hash table. Note that this is not a general purpose
* class. For example, it cannot remove items from the
* set, nor does it resize its hash table to be smaller,
* etc. It is designed to be quick to test if a char[]
* is in the set without the necessity of converting it
* to a String first.
* <P>
* <em>Please note:</em> This class implements {@link java.util.Set Set} but
* does not behave like it should in all cases. The generic type is
* {@code Set<Object>}, because you can add any object to it,
* that has a string representation. The add methods will use
* {@link Object#toString} and store the result using a {@code char[]}
* buffer. The same behaviour have the {@code contains()} methods.
* The {@link #iterator()} returns an {@code Iterator<String>}.
* For type safety also {@link #stringIterator()} is provided.
*/
public class CharArraySet extends AbstractSet<Object> {
private final static int INIT_SIZE = 8;
private char[][] entries;
private int count;
private final boolean ignoreCase;
public static final CharArraySet EMPTY_SET = CharArraySet.unmodifiableSet(new CharArraySet(0, false));
/** Create set with enough capacity to hold startSize
* terms */
public CharArraySet(int startSize, boolean ignoreCase) {
this.ignoreCase = ignoreCase;
int size = INIT_SIZE;
while(startSize + (startSize>>2) > size)
size <<= 1;
entries = new char[size][];
}
/** Create set from a Collection of char[] or String */
public CharArraySet(Collection<? extends Object> c, boolean ignoreCase) {
this(c.size(), ignoreCase);
addAll(c);
}
/** Create set from entries */
private CharArraySet(char[][] entries, boolean ignoreCase, int count){
this.entries = entries;
this.ignoreCase = ignoreCase;
this.count = count;
}
/** true if the <code>len</code> chars of <code>text</code> starting at <code>off</code>
* are in the set */
public boolean contains(char[] text, int off, int len) {
return entries[getSlot(text, off, len)] != null;
}
/** true if the <code>CharSequence</code> is in the set */
public boolean contains(CharSequence cs) {
return entries[getSlot(cs)] != null;
}
private int getSlot(char[] text, int off, int len) {
int code = getHashCode(text, off, len);
int pos = code & (entries.length-1);
char[] text2 = entries[pos];
if (text2 != null && !equals(text, off, len, text2)) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
pos = code & (entries.length-1);
text2 = entries[pos];
} while (text2 != null && !equals(text, off, len, text2));
}
return pos;
}
/** Returns true if the String is in the set */
private int getSlot(CharSequence text) {
int code = getHashCode(text);
int pos = code & (entries.length-1);
char[] text2 = entries[pos];
if (text2 != null && !equals(text, text2)) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
pos = code & (entries.length-1);
text2 = entries[pos];
} while (text2 != null && !equals(text, text2));
}
return pos;
}
/** Add this CharSequence into the set */
public boolean add(CharSequence text) {
return add(text.toString()); // could be more efficient
}
/** Add this String into the set */
public boolean add(String text) {
return add(text.toCharArray());
}
/** Add this char[] directly to the set.
* If ignoreCase is true for this Set, the text array will be directly modified.
* The user should never modify this text array after calling this method.
*/
public boolean add(char[] text) {
if (ignoreCase)
for(int i=0;i<text.length;i++)
text[i] = Character.toLowerCase(text[i]);
int slot = getSlot(text, 0, text.length);
if (entries[slot] != null) return false;
entries[slot] = text;
count++;
if (count + (count>>2) > entries.length) {
rehash();
}
return true;
}
private boolean equals(char[] text1, int off, int len, char[] text2) {
if (len != text2.length)
return false;
if (ignoreCase) {
for(int i=0;i<len;i++) {
if (Character.toLowerCase(text1[off+i]) != text2[i])
return false;
}
} else {
for(int i=0;i<len;i++) {
if (text1[off+i] != text2[i])
return false;
}
}
return true;
}
private boolean equals(CharSequence text1, char[] text2) {
int len = text1.length();
if (len != text2.length)
return false;
if (ignoreCase) {
for(int i=0;i<len;i++) {
if (Character.toLowerCase(text1.charAt(i)) != text2[i])
return false;
}
} else {
for(int i=0;i<len;i++) {
if (text1.charAt(i) != text2[i])
return false;
}
}
return true;
}
private void rehash() {
final int newSize = 2*entries.length;
char[][] oldEntries = entries;
entries = new char[newSize][];
for(int i=0;i<oldEntries.length;i++) {
char[] text = oldEntries[i];
if (text != null) {
// todo: could be faster... no need to compare strings on collision
entries[getSlot(text,0,text.length)] = text;
}
}
}
private int getHashCode(char[] text, int offset, int len) {
int code = 0;
final int stop = offset + len;
if (ignoreCase) {
for (int i=offset; i<stop; i++) {
code = code*31 + Character.toLowerCase(text[i]);
}
} else {
for (int i=offset; i<stop; i++) {
code = code*31 + text[i];
}
}
return code;
}
private int getHashCode(CharSequence text) {
int code = 0;
int len = text.length();
if (ignoreCase) {
for (int i=0; i<len; i++) {
code = code*31 + Character.toLowerCase(text.charAt(i));
}
} else {
for (int i=0; i<len; i++) {
code = code*31 + text.charAt(i);
}
}
return code;
}
@Override
public int size() {
return count;
}
@Override
public boolean isEmpty() {
return count==0;
}
@Override
public boolean contains(Object o) {
if (o instanceof char[]) {
final char[] text = (char[])o;
return contains(text, 0, text.length);
}
return contains(o.toString());
}
@Override
public boolean add(Object o) {
if (o instanceof char[]) {
return add((char[])o);
}
return add(o.toString());
}
/**
* Returns an unmodifiable {@link CharArraySet}. This allows to provide
* unmodifiable views of internal sets for "read-only" use.
*
* @param set
* a set for which the unmodifiable set is returned.
* @return an new unmodifiable {@link CharArraySet}.
* @throws NullPointerException
* if the given set is <code>null</code>.
*/
public static CharArraySet unmodifiableSet(CharArraySet set) {
if (set == null)
throw new NullPointerException("Given set is null");
if (set == EMPTY_SET)
return EMPTY_SET;
if (set instanceof UnmodifiableCharArraySet)
return set;
/*
* Instead of delegating calls to the given set copy the low-level values to
* the unmodifiable Subclass
*/
return new UnmodifiableCharArraySet(set.entries, set.ignoreCase, set.count);
}
/**
* Returns a copy of the given set as a {@link CharArraySet}. If the given set
* is a {@link CharArraySet} the ignoreCase property will be preserved.
*
* @param set
* a set to copy
* @return a copy of the given set as a {@link CharArraySet}. If the given set
* is a {@link CharArraySet} the ignoreCase property will be
* preserved.
*/
public static CharArraySet copy(Set<?> set) {
if (set == null)
throw new NullPointerException("Given set is null");
if(set == EMPTY_SET)
return EMPTY_SET;
final boolean ignoreCase = set instanceof CharArraySet ? ((CharArraySet) set).ignoreCase
: false;
return new CharArraySet(set, ignoreCase);
}
/** The Iterator<String> for this set. Strings are constructed on the fly, so
* use <code>nextCharArray</code> for more efficient access. */
public class CharArraySetIterator implements Iterator<String> {
int pos=-1;
char[] next;
CharArraySetIterator() {
goNext();
}
private void goNext() {
next = null;
pos++;
while (pos < entries.length && (next=entries[pos]) == null) pos++;
}
public boolean hasNext() {
return next != null;
}
/** do not modify the returned char[] */
public char[] nextCharArray() {
char[] ret = next;
goNext();
return ret;
}
/** Returns the next String, as a Set<String> would...
* use nextCharArray() for better efficiency. */
public String next() {
return new String(nextCharArray());
}
public void remove() {
throw new UnsupportedOperationException();
}
}
/** returns an iterator of new allocated Strings */
public Iterator<String> stringIterator() {
return new CharArraySetIterator();
}
/** returns an iterator of new allocated Strings, this method violates the Set interface */
@Override
@SuppressWarnings("unchecked")
public Iterator<Object> iterator() {
return (Iterator) stringIterator();
}
/**
* Efficient unmodifiable {@link CharArraySet}. This implementation does not
* delegate calls to a give {@link CharArraySet} like
* {@link Collections#unmodifiableSet(java.util.Set)} does. Instead is passes
* the internal representation of a {@link CharArraySet} to a super
* constructor and overrides all mutators.
*/
private static final class UnmodifiableCharArraySet extends CharArraySet {
private UnmodifiableCharArraySet(char[][] entries, boolean ignoreCase,
int count) {
super(entries, ignoreCase, count);
}
@Override
public boolean add(Object o){
throw new UnsupportedOperationException();
}
@Override
public boolean addAll(Collection<? extends Object> coll) {
throw new UnsupportedOperationException();
}
@Override
public boolean add(char[] text) {
throw new UnsupportedOperationException();
}
@Override
public boolean add(CharSequence text) {
throw new UnsupportedOperationException();
}
@Override
public boolean add(String text) {
throw new UnsupportedOperationException();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/CharArraySet.java | Java | art | 11,486 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis;
import java.util.HashMap;
import java.util.Map;
/**
* Holds a map of String input to String output, to be used
* with {@link MappingCharFilter}.
*/
public class NormalizeCharMap {
Map<Character, NormalizeCharMap> submap;
String normStr;
int diff;
/** Records a replacement to be applied to the inputs
* stream. Whenever <code>singleMatch</code> occurs in
* the input, it will be replaced with
* <code>replacement</code>.
*
* @param singleMatch input String to be replaced
* @param replacement output String
*/
public void add(String singleMatch, String replacement) {
NormalizeCharMap currMap = this;
for(int i = 0; i < singleMatch.length(); i++) {
char c = singleMatch.charAt(i);
if (currMap.submap == null) {
currMap.submap = new HashMap<Character, NormalizeCharMap>(1);
}
NormalizeCharMap map = currMap.submap.get(Character.valueOf(c));
if (map == null) {
map = new NormalizeCharMap();
currMap.submap.put(Character.valueOf(c), map);
}
currMap = map;
}
if (currMap.normStr != null) {
throw new RuntimeException("MappingCharFilter: there is already a mapping for " + singleMatch);
}
currMap.normStr = replacement;
currMap.diff = singleMatch.length() - replacement.length();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/NormalizeCharMap.java | Java | art | 2,163 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import java.io.IOException;
/** An {@link Analyzer} that filters {@link LetterTokenizer}
* with {@link LowerCaseFilter} */
public final class SimpleAnalyzer extends Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new LowerCaseTokenizer(reader);
}
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
if (tokenizer == null) {
tokenizer = new LowerCaseTokenizer(reader);
setPreviousTokenStream(tokenizer);
} else
tokenizer.reset(reader);
return tokenizer;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/SimpleAnalyzer.java | Java | art | 1,534 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.lang.ref.WeakReference;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeSource;
/**
* This TokenFilter provides the ability to set aside attribute states
* that have already been analyzed. This is useful in situations where multiple fields share
* many common analysis steps and then go their separate ways.
* <p/>
* It is also useful for doing things like entity extraction or proper noun analysis as
* part of the analysis workflow and saving off those tokens for use in another field.
*
* <pre>
TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader1));
TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
TeeSinkTokenFilter source2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader2));
source2.addSinkTokenStream(sink1);
source2.addSinkTokenStream(sink2);
TokenStream final1 = new LowerCaseFilter(source1);
TokenStream final2 = source2;
TokenStream final3 = new EntityDetect(sink1);
TokenStream final4 = new URLDetect(sink2);
d.add(new Field("f1", final1));
d.add(new Field("f2", final2));
d.add(new Field("f3", final3));
d.add(new Field("f4", final4));
* </pre>
* In this example, <code>sink1</code> and <code>sink2</code> will both get tokens from both
* <code>reader1</code> and <code>reader2</code> after whitespace tokenizer
* and now we can further wrap any of these in extra analysis, and more "sources" can be inserted if desired.
* It is important, that tees are consumed before sinks (in the above example, the field names must be
* less the sink's field names). If you are not sure, which stream is consumed first, you can simply
* add another sink and then pass all tokens to the sinks at once using {@link #consumeAllTokens}.
* This TokenFilter is exhausted after this. In the above example, change
* the example above to:
* <pre>
...
TokenStream final1 = new LowerCaseFilter(source1.newSinkTokenStream());
TokenStream final2 = source2.newSinkTokenStream();
sink1.consumeAllTokens();
sink2.consumeAllTokens();
...
* </pre>
* In this case, the fields can be added in any order, because the sources are not used anymore and all sinks are ready.
* <p>Note, the EntityDetect and URLDetect TokenStreams are for the example and do not currently exist in Lucene.
*/
public final class TeeSinkTokenFilter extends TokenFilter {
private final List<WeakReference<SinkTokenStream>> sinks = new LinkedList<WeakReference<SinkTokenStream>>();
/**
* Instantiates a new TeeSinkTokenFilter.
*/
public TeeSinkTokenFilter(TokenStream input) {
super(input);
}
/**
* Returns a new {@link SinkTokenStream} that receives all tokens consumed by this stream.
*/
public SinkTokenStream newSinkTokenStream() {
return newSinkTokenStream(ACCEPT_ALL_FILTER);
}
/**
* Returns a new {@link SinkTokenStream} that receives all tokens consumed by this stream
* that pass the supplied filter.
* @see SinkFilter
*/
public SinkTokenStream newSinkTokenStream(SinkFilter filter) {
SinkTokenStream sink = new SinkTokenStream(this.cloneAttributes(), filter);
this.sinks.add(new WeakReference<SinkTokenStream>(sink));
return sink;
}
/**
* Adds a {@link SinkTokenStream} created by another <code>TeeSinkTokenFilter</code>
* to this one. The supplied stream will also receive all consumed tokens.
* This method can be used to pass tokens from two different tees to one sink.
*/
public void addSinkTokenStream(final SinkTokenStream sink) {
// check that sink has correct factory
if (!this.getAttributeFactory().equals(sink.getAttributeFactory())) {
throw new IllegalArgumentException("The supplied sink is not compatible to this tee");
}
// add eventually missing attribute impls to the existing sink
for (Iterator<AttributeImpl> it = this.cloneAttributes().getAttributeImplsIterator(); it.hasNext(); ) {
sink.addAttributeImpl(it.next());
}
this.sinks.add(new WeakReference<SinkTokenStream>(sink));
}
/**
* <code>TeeSinkTokenFilter</code> passes all tokens to the added sinks
* when itself is consumed. To be sure, that all tokens from the input
* stream are passed to the sinks, you can call this methods.
* This instance is exhausted after this, but all sinks are instant available.
*/
public void consumeAllTokens() throws IOException {
while (incrementToken());
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
// capture state lazily - maybe no SinkFilter accepts this state
AttributeSource.State state = null;
for (WeakReference<SinkTokenStream> ref : sinks) {
final SinkTokenStream sink = ref.get();
if (sink != null) {
if (sink.accept(this)) {
if (state == null) {
state = this.captureState();
}
sink.addState(state);
}
}
}
return true;
}
return false;
}
@Override
public final void end() throws IOException {
super.end();
AttributeSource.State finalState = captureState();
for (WeakReference<SinkTokenStream> ref : sinks) {
final SinkTokenStream sink = ref.get();
if (sink != null) {
sink.setFinalState(finalState);
}
}
}
/**
* A filter that decides which {@link AttributeSource} states to store in the sink.
*/
public static abstract class SinkFilter {
/**
* Returns true, iff the current state of the passed-in {@link AttributeSource} shall be stored
* in the sink.
*/
public abstract boolean accept(AttributeSource source);
/**
* Called by {@link SinkTokenStream#reset()}. This method does nothing by default
* and can optionally be overridden.
*/
public void reset() throws IOException {
// nothing to do; can be overridden
}
}
public static final class SinkTokenStream extends TokenStream {
private final List<AttributeSource.State> cachedStates = new LinkedList<AttributeSource.State>();
private AttributeSource.State finalState;
private Iterator<AttributeSource.State> it = null;
private SinkFilter filter;
private SinkTokenStream(AttributeSource source, SinkFilter filter) {
super(source);
this.filter = filter;
}
private boolean accept(AttributeSource source) {
return filter.accept(source);
}
private void addState(AttributeSource.State state) {
if (it != null) {
throw new IllegalStateException("The tee must be consumed before sinks are consumed.");
}
cachedStates.add(state);
}
private void setFinalState(AttributeSource.State finalState) {
this.finalState = finalState;
}
@Override
public final boolean incrementToken() throws IOException {
// lazy init the iterator
if (it == null) {
it = cachedStates.iterator();
}
if (!it.hasNext()) {
return false;
}
AttributeSource.State state = it.next();
restoreState(state);
return true;
}
@Override
public final void end() throws IOException {
if (finalState != null) {
restoreState(finalState);
}
}
@Override
public final void reset() {
it = cachedStates.iterator();
}
}
private static final SinkFilter ACCEPT_ALL_FILTER = new SinkFilter() {
@Override
public boolean accept(AttributeSource source) {
return true;
}
};
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java | Java | art | 8,598 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Reader;
/**
* "Tokenizes" the entire stream as a single token. This is useful
* for data like zip codes, ids, and some product names.
*/
public class KeywordAnalyzer extends Analyzer {
public KeywordAnalyzer() {
setOverridesTokenStreamMethod(KeywordAnalyzer.class);
}
@Override
public TokenStream tokenStream(String fieldName,
final Reader reader) {
return new KeywordTokenizer(reader);
}
@Override
public TokenStream reusableTokenStream(String fieldName,
final Reader reader) throws IOException {
if (overridesTokenStreamMethod) {
// LUCENE-1678: force fallback to tokenStream() if we
// have been subclassed and that subclass overrides
// tokenStream but not reusableTokenStream
return tokenStream(fieldName, reader);
}
Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
if (tokenizer == null) {
tokenizer = new KeywordTokenizer(reader);
setPreviousTokenStream(tokenizer);
} else
tokenizer.reset(reader);
return tokenizer;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/KeywordAnalyzer.java | Java | art | 1,990 |
<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
<body>
<p>API and code to convert text into indexable/searchable tokens. Covers {@link org.apache.lucene.analysis.Analyzer} and related classes.</p>
<h2>Parsing? Tokenization? Analysis!</h2>
<p>
Lucene, indexing and search library, accepts only plain text input.
<p>
<h2>Parsing</h2>
<p>
Applications that build their search capabilities upon Lucene may support documents in various formats – HTML, XML, PDF, Word – just to name a few.
Lucene does not care about the <i>Parsing</i> of these and other document formats, and it is the responsibility of the
application using Lucene to use an appropriate <i>Parser</i> to convert the original format into plain text before passing that plain text to Lucene.
<p>
<h2>Tokenization</h2>
<p>
Plain text passed to Lucene for indexing goes through a process generally called tokenization. Tokenization is the process
of breaking input text into small indexing elements – tokens.
The way input text is broken into tokens heavily influences how people will then be able to search for that text.
For instance, sentences beginnings and endings can be identified to provide for more accurate phrase
and proximity searches (though sentence identification is not provided by Lucene).
<p>
In some cases simply breaking the input text into tokens is not enough – a deeper <i>Analysis</i> may be needed.
There are many post tokenization steps that can be done, including (but not limited to):
<ul>
<li><a href="http://en.wikipedia.org/wiki/Stemming">Stemming</a> –
Replacing of words by their stems.
For instance with English stemming "bikes" is replaced by "bike";
now query "bike" can find both documents containing "bike" and those containing "bikes".
</li>
<li><a href="http://en.wikipedia.org/wiki/Stop_words">Stop Words Filtering</a> –
Common words like "the", "and" and "a" rarely add any value to a search.
Removing them shrinks the index size and increases performance.
It may also reduce some "noise" and actually improve search quality.
</li>
<li><a href="http://en.wikipedia.org/wiki/Text_normalization">Text Normalization</a> –
Stripping accents and other character markings can make for better searching.
</li>
<li><a href="http://en.wikipedia.org/wiki/Synonym">Synonym Expansion</a> –
Adding in synonyms at the same token position as the current word can mean better
matching when users search with words in the synonym set.
</li>
</ul>
<p>
<h2>Core Analysis</h2>
<p>
The analysis package provides the mechanism to convert Strings and Readers into tokens that can be indexed by Lucene. There
are three main classes in the package from which all analysis processes are derived. These are:
<ul>
<li>{@link org.apache.lucene.analysis.Analyzer} – An Analyzer is responsible for building a {@link org.apache.lucene.analysis.TokenStream} which can be consumed
by the indexing and searching processes. See below for more information on implementing your own Analyzer.</li>
<li>{@link org.apache.lucene.analysis.Tokenizer} – A Tokenizer is a {@link org.apache.lucene.analysis.TokenStream} and is responsible for breaking
up incoming text into tokens. In most cases, an Analyzer will use a Tokenizer as the first step in
the analysis process.</li>
<li>{@link org.apache.lucene.analysis.TokenFilter} – A TokenFilter is also a {@link org.apache.lucene.analysis.TokenStream} and is responsible
for modifying tokens that have been created by the Tokenizer. Common modifications performed by a
TokenFilter are: deletion, stemming, synonym injection, and down casing. Not all Analyzers require TokenFilters</li>
</ul>
<b>Lucene 2.9 introduces a new TokenStream API. Please see the section "New TokenStream API" below for more details.</b>
</p>
<h2>Hints, Tips and Traps</h2>
<p>
The synergy between {@link org.apache.lucene.analysis.Analyzer} and {@link org.apache.lucene.analysis.Tokenizer}
is sometimes confusing. To ease on this confusion, some clarifications:
<ul>
<li>The {@link org.apache.lucene.analysis.Analyzer} is responsible for the entire task of
<u>creating</u> tokens out of the input text, while the {@link org.apache.lucene.analysis.Tokenizer}
is only responsible for <u>breaking</u> the input text into tokens. Very likely, tokens created
by the {@link org.apache.lucene.analysis.Tokenizer} would be modified or even omitted
by the {@link org.apache.lucene.analysis.Analyzer} (via one or more
{@link org.apache.lucene.analysis.TokenFilter}s) before being returned.
</li>
<li>{@link org.apache.lucene.analysis.Tokenizer} is a {@link org.apache.lucene.analysis.TokenStream},
but {@link org.apache.lucene.analysis.Analyzer} is not.
</li>
<li>{@link org.apache.lucene.analysis.Analyzer} is "field aware", but
{@link org.apache.lucene.analysis.Tokenizer} is not.
</li>
</ul>
</p>
<p>
Lucene Java provides a number of analysis capabilities, the most commonly used one being the {@link
org.apache.lucene.analysis.standard.StandardAnalyzer}. Many applications will have a long and industrious life with nothing more
than the StandardAnalyzer. However, there are a few other classes/packages that are worth mentioning:
<ol>
<li>{@link org.apache.lucene.analysis.PerFieldAnalyzerWrapper} – Most Analyzers perform the same operation on all
{@link org.apache.lucene.document.Field}s. The PerFieldAnalyzerWrapper can be used to associate a different Analyzer with different
{@link org.apache.lucene.document.Field}s.</li>
<li>The contrib/analyzers library located at the root of the Lucene distribution has a number of different Analyzer implementations to solve a variety
of different problems related to searching. Many of the Analyzers are designed to analyze non-English languages.</li>
<li>The contrib/snowball library
located at the root of the Lucene distribution has Analyzer and TokenFilter
implementations for a variety of Snowball stemmers.
See <a href="http://snowball.tartarus.org">http://snowball.tartarus.org</a>
for more information on Snowball stemmers.</li>
<li>There are a variety of Tokenizer and TokenFilter implementations in this package. Take a look around, chances are someone has implemented what you need.</li>
</ol>
</p>
<p>
Analysis is one of the main causes of performance degradation during indexing. Simply put, the more you analyze the slower the indexing (in most cases).
Perhaps your application would be just fine using the simple {@link org.apache.lucene.analysis.WhitespaceTokenizer} combined with a
{@link org.apache.lucene.analysis.StopFilter}. The contrib/benchmark library can be useful for testing out the speed of the analysis process.
</p>
<h2>Invoking the Analyzer</h2>
<p>
Applications usually do not invoke analysis – Lucene does it for them:
<ul>
<li>At indexing, as a consequence of
{@link org.apache.lucene.index.IndexWriter#addDocument(org.apache.lucene.document.Document) addDocument(doc)},
the Analyzer in effect for indexing is invoked for each indexed field of the added document.
</li>
<li>At search, as a consequence of
{@link org.apache.lucene.queryParser.QueryParser#parse(java.lang.String) QueryParser.parse(queryText)},
the QueryParser may invoke the Analyzer in effect.
Note that for some queries analysis does not take place, e.g. wildcard queries.
</li>
</ul>
However an application might invoke Analysis of any text for testing or for any other purpose, something like:
<PRE>
Analyzer analyzer = new StandardAnalyzer(); // or any other analyzer
TokenStream ts = analyzer.tokenStream("myfield",new StringReader("some text goes here"));
while (ts.incrementToken()) {
System.out.println("token: "+ts));
}
</PRE>
</p>
<h2>Indexing Analysis vs. Search Analysis</h2>
<p>
Selecting the "correct" analyzer is crucial
for search quality, and can also affect indexing and search performance.
The "correct" analyzer differs between applications.
Lucene java's wiki page
<a href="http://wiki.apache.org/lucene-java/AnalysisParalysis">AnalysisParalysis</a>
provides some data on "analyzing your analyzer".
Here are some rules of thumb:
<ol>
<li>Test test test... (did we say test?)</li>
<li>Beware of over analysis – might hurt indexing performance.</li>
<li>Start with same analyzer for indexing and search, otherwise searches would not find what they are supposed to...</li>
<li>In some cases a different analyzer is required for indexing and search, for instance:
<ul>
<li>Certain searches require more stop words to be filtered. (I.e. more than those that were filtered at indexing.)</li>
<li>Query expansion by synonyms, acronyms, auto spell correction, etc.</li>
</ul>
This might sometimes require a modified analyzer – see the next section on how to do that.
</li>
</ol>
</p>
<h2>Implementing your own Analyzer</h2>
<p>Creating your own Analyzer is straightforward. It usually involves either wrapping an existing Tokenizer and set of TokenFilters to create a new Analyzer
or creating both the Analyzer and a Tokenizer or TokenFilter. Before pursuing this approach, you may find it worthwhile
to explore the contrib/analyzers library and/or ask on the java-user@lucene.apache.org mailing list first to see if what you need already exists.
If you are still committed to creating your own Analyzer or TokenStream derivation (Tokenizer or TokenFilter) have a look at
the source code of any one of the many samples located in this package.
</p>
<p>
The following sections discuss some aspects of implementing your own analyzer.
</p>
<h3>Field Section Boundaries</h3>
<p>
When {@link org.apache.lucene.document.Document#add(org.apache.lucene.document.Fieldable) document.add(field)}
is called multiple times for the same field name, we could say that each such call creates a new
section for that field in that document.
In fact, a separate call to
{@link org.apache.lucene.analysis.Analyzer#tokenStream(java.lang.String, java.io.Reader) tokenStream(field,reader)}
would take place for each of these so called "sections".
However, the default Analyzer behavior is to treat all these sections as one large section.
This allows phrase search and proximity search to seamlessly cross
boundaries between these "sections".
In other words, if a certain field "f" is added like this:
<PRE>
document.add(new Field("f","first ends",...);
document.add(new Field("f","starts two",...);
indexWriter.addDocument(document);
</PRE>
Then, a phrase search for "ends starts" would find that document.
Where desired, this behavior can be modified by introducing a "position gap" between consecutive field "sections",
simply by overriding
{@link org.apache.lucene.analysis.Analyzer#getPositionIncrementGap(java.lang.String) Analyzer.getPositionIncrementGap(fieldName)}:
<PRE>
Analyzer myAnalyzer = new StandardAnalyzer() {
public int getPositionIncrementGap(String fieldName) {
return 10;
}
};
</PRE>
</p>
<h3>Token Position Increments</h3>
<p>
By default, all tokens created by Analyzers and Tokenizers have a
{@link org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute#getPositionIncrement() position increment} of one.
This means that the position stored for that token in the index would be one more than
that of the previous token.
Recall that phrase and proximity searches rely on position info.
</p>
<p>
If the selected analyzer filters the stop words "is" and "the", then for a document
containing the string "blue is the sky", only the tokens "blue", "sky" are indexed,
with position("sky") = 1 + position("blue"). Now, a phrase query "blue is the sky"
would find that document, because the same analyzer filters the same stop words from
that query. But also the phrase query "blue sky" would find that document.
</p>
<p>
If this behavior does not fit the application needs,
a modified analyzer can be used, that would increment further the positions of
tokens following a removed stop word, using
{@link org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute#setPositionIncrement(int)}.
This can be done with something like:
<PRE>
public TokenStream tokenStream(final String fieldName, Reader reader) {
final TokenStream ts = someAnalyzer.tokenStream(fieldName, reader);
TokenStream res = new TokenStream() {
TermAttribute termAtt = addAttribute(TermAttribute.class);
PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
public boolean incrementToken() throws IOException {
int extraIncrement = 0;
while (true) {
boolean hasNext = ts.incrementToken();
if (hasNext) {
if (stopWords.contains(termAtt.term())) {
extraIncrement++; // filter this word
continue;
}
if (extraIncrement>0) {
posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement()+extraIncrement);
}
}
return hasNext;
}
}
};
return res;
}
</PRE>
Now, with this modified analyzer, the phrase query "blue sky" would find that document.
But note that this is yet not a perfect solution, because any phrase query "blue w1 w2 sky"
where both w1 and w2 are stop words would match that document.
</p>
<p>
Few more use cases for modifying position increments are:
<ol>
<li>Inhibiting phrase and proximity matches in sentence boundaries – for this, a tokenizer that
identifies a new sentence can add 1 to the position increment of the first token of the new sentence.</li>
<li>Injecting synonyms – here, synonyms of a token should be added after that token,
and their position increment should be set to 0.
As result, all synonyms of a token would be considered to appear in exactly the
same position as that token, and so would they be seen by phrase and proximity searches.</li>
</ol>
</p>
<h2>New TokenStream API</h2>
<p>
With Lucene 2.9 we introduce a new TokenStream API. The old API used to produce Tokens. A Token
has getter and setter methods for different properties like positionIncrement and termText.
While this approach was sufficient for the default indexing format, it is not versatile enough for
Flexible Indexing, a term which summarizes the effort of making the Lucene indexer pluggable and extensible for custom
index formats.
</p>
<p>
A fully customizable indexer means that users will be able to store custom data structures on disk. Therefore an API
is necessary that can transport custom types of data from the documents to the indexer.
</p>
<h3>Attribute and AttributeSource</h3>
Lucene 2.9 therefore introduces a new pair of classes called {@link org.apache.lucene.util.Attribute} and
{@link org.apache.lucene.util.AttributeSource}. An Attribute serves as a
particular piece of information about a text token. For example, {@link org.apache.lucene.analysis.tokenattributes.TermAttribute}
contains the term text of a token, and {@link org.apache.lucene.analysis.tokenattributes.OffsetAttribute} contains the start and end character offsets of a token.
An AttributeSource is a collection of Attributes with a restriction: there may be only one instance of each attribute type. TokenStream now extends AttributeSource, which
means that one can add Attributes to a TokenStream. Since TokenFilter extends TokenStream, all filters are also
AttributeSources.
<p>
Lucene now provides six Attributes out of the box, which replace the variables the Token class has:
<ul>
<li>{@link org.apache.lucene.analysis.tokenattributes.TermAttribute}<p>The term text of a token.</p></li>
<li>{@link org.apache.lucene.analysis.tokenattributes.OffsetAttribute}<p>The start and end offset of token in characters.</p></li>
<li>{@link org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute}<p>See above for detailed information about position increment.</p></li>
<li>{@link org.apache.lucene.analysis.tokenattributes.PayloadAttribute}<p>The payload that a Token can optionally have.</p></li>
<li>{@link org.apache.lucene.analysis.tokenattributes.TypeAttribute}<p>The type of the token. Default is 'word'.</p></li>
<li>{@link org.apache.lucene.analysis.tokenattributes.FlagsAttribute}<p>Optional flags a token can have.</p></li>
</ul>
</p>
<h3>Using the new TokenStream API</h3>
There are a few important things to know in order to use the new API efficiently which are summarized here. You may want
to walk through the example below first and come back to this section afterwards.
<ol><li>
Please keep in mind that an AttributeSource can only have one instance of a particular Attribute. Furthermore, if
a chain of a TokenStream and multiple TokenFilters is used, then all TokenFilters in that chain share the Attributes
with the TokenStream.
</li>
<br>
<li>
Attribute instances are reused for all tokens of a document. Thus, a TokenStream/-Filter needs to update
the appropriate Attribute(s) in incrementToken(). The consumer, commonly the Lucene indexer, consumes the data in the
Attributes and then calls incrementToken() again until it retuns false, which indicates that the end of the stream
was reached. This means that in each call of incrementToken() a TokenStream/-Filter can safely overwrite the data in
the Attribute instances.
</li>
<br>
<li>
For performance reasons a TokenStream/-Filter should add/get Attributes during instantiation; i.e., create an attribute in the
constructor and store references to it in an instance variable. Using an instance variable instead of calling addAttribute()/getAttribute()
in incrementToken() will avoid attribute lookups for every token in the document.
</li>
<br>
<li>
All methods in AttributeSource are idempotent, which means calling them multiple times always yields the same
result. This is especially important to know for addAttribute(). The method takes the <b>type</b> (<code>Class</code>)
of an Attribute as an argument and returns an <b>instance</b>. If an Attribute of the same type was previously added, then
the already existing instance is returned, otherwise a new instance is created and returned. Therefore TokenStreams/-Filters
can safely call addAttribute() with the same Attribute type multiple times. Even consumers of TokenStreams should
normally call addAttribute() instead of getAttribute(), because it would not fail if the TokenStream does not have this
Attribute (getAttribute() would throw an IllegalArgumentException, if the Attribute is missing). More advanced code
could simply check with hasAttribute(), if a TokenStream has it, and may conditionally leave out processing for
extra performance.
</li></ol>
<h3>Example</h3>
In this example we will create a WhiteSpaceTokenizer and use a LengthFilter to suppress all words that only
have two or less characters. The LengthFilter is part of the Lucene core and its implementation will be explained
here to illustrate the usage of the new TokenStream API.<br>
Then we will develop a custom Attribute, a PartOfSpeechAttribute, and add another filter to the chain which
utilizes the new custom attribute, and call it PartOfSpeechTaggingFilter.
<h4>Whitespace tokenization</h4>
<pre>
public class MyAnalyzer extends Analyzer {
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream stream = new WhitespaceTokenizer(reader);
return stream;
}
public static void main(String[] args) throws IOException {
// text to tokenize
final String text = "This is a demo of the new TokenStream API";
MyAnalyzer analyzer = new MyAnalyzer();
TokenStream stream = analyzer.tokenStream("field", new StringReader(text));
// get the TermAttribute from the TokenStream
TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
stream.reset();
// print all tokens until stream is exhausted
while (stream.incrementToken()) {
System.out.println(termAtt.term());
}
stream.end()
stream.close();
}
}
</pre>
In this easy example a simple white space tokenization is performed. In main() a loop consumes the stream and
prints the term text of the tokens by accessing the TermAttribute that the WhitespaceTokenizer provides.
Here is the output:
<pre>
This
is
a
demo
of
the
new
TokenStream
API
</pre>
<h4>Adding a LengthFilter</h4>
We want to suppress all tokens that have 2 or less characters. We can do that easily by adding a LengthFilter
to the chain. Only the tokenStream() method in our analyzer needs to be changed:
<pre>
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream stream = new WhitespaceTokenizer(reader);
stream = new LengthFilter(stream, 3, Integer.MAX_VALUE);
return stream;
}
</pre>
Note how now only words with 3 or more characters are contained in the output:
<pre>
This
demo
the
new
TokenStream
API
</pre>
Now let's take a look how the LengthFilter is implemented (it is part of Lucene's core):
<pre>
public final class LengthFilter extends TokenFilter {
final int min;
final int max;
private TermAttribute termAtt;
/**
* Build a filter that removes words that are too long or too
* short from the text.
*/
public LengthFilter(TokenStream in, int min, int max)
{
super(in);
this.min = min;
this.max = max;
termAtt = addAttribute(TermAttribute.class);
}
/**
* Returns the next input Token whose term() is the right len
*/
public final boolean incrementToken() throws IOException
{
assert termAtt != null;
// return the first non-stop word found
while (input.incrementToken()) {
int len = termAtt.termLength();
if (len >= min && len <= max) {
return true;
}
// note: else we ignore it but should we index each part of it?
}
// reached EOS -- return null
return false;
}
}
</pre>
The TermAttribute is added in the constructor and stored in the instance variable <code>termAtt</code>.
Remember that there can only be a single instance of TermAttribute in the chain, so in our example the
<code>addAttribute()</code> call in LengthFilter returns the TermAttribute that the WhitespaceTokenizer already added. The tokens
are retrieved from the input stream in the <code>incrementToken()</code> method. By looking at the term text
in the TermAttribute the length of the term can be determined and too short or too long tokens are skipped.
Note how <code>incrementToken()</code> can efficiently access the instance variable; no attribute lookup
is neccessary. The same is true for the consumer, which can simply use local references to the Attributes.
<h4>Adding a custom Attribute</h4>
Now we're going to implement our own custom Attribute for part-of-speech tagging and call it consequently
<code>PartOfSpeechAttribute</code>. First we need to define the interface of the new Attribute:
<pre>
public interface PartOfSpeechAttribute extends Attribute {
public static enum PartOfSpeech {
Noun, Verb, Adjective, Adverb, Pronoun, Preposition, Conjunction, Article, Unknown
}
public void setPartOfSpeech(PartOfSpeech pos);
public PartOfSpeech getPartOfSpeech();
}
</pre>
Now we also need to write the implementing class. The name of that class is important here: By default, Lucene
checks if there is a class with the name of the Attribute with the postfix 'Impl'. In this example, we would
consequently call the implementing class <code>PartOfSpeechAttributeImpl</code>. <br/>
This should be the usual behavior. However, there is also an expert-API that allows changing these naming conventions:
{@link org.apache.lucene.util.AttributeSource.AttributeFactory}. The factory accepts an Attribute interface as argument
and returns an actual instance. You can implement your own factory if you need to change the default behavior. <br/><br/>
Now here is the actual class that implements our new Attribute. Notice that the class has to extend
{@link org.apache.lucene.util.AttributeImpl}:
<pre>
public final class PartOfSpeechAttributeImpl extends AttributeImpl
implements PartOfSpeechAttribute{
private PartOfSpeech pos = PartOfSpeech.Unknown;
public void setPartOfSpeech(PartOfSpeech pos) {
this.pos = pos;
}
public PartOfSpeech getPartOfSpeech() {
return pos;
}
public void clear() {
pos = PartOfSpeech.Unknown;
}
public void copyTo(AttributeImpl target) {
((PartOfSpeechAttributeImpl) target).pos = pos;
}
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof PartOfSpeechAttributeImpl) {
return pos == ((PartOfSpeechAttributeImpl) other).pos;
}
return false;
}
public int hashCode() {
return pos.ordinal();
}
}
</pre>
This is a simple Attribute implementation has only a single variable that stores the part-of-speech of a token. It extends the
new <code>AttributeImpl</code> class and therefore implements its abstract methods <code>clear(), copyTo(), equals(), hashCode()</code>.
Now we need a TokenFilter that can set this new PartOfSpeechAttribute for each token. In this example we show a very naive filter
that tags every word with a leading upper-case letter as a 'Noun' and all other words as 'Unknown'.
<pre>
public static class PartOfSpeechTaggingFilter extends TokenFilter {
PartOfSpeechAttribute posAtt;
TermAttribute termAtt;
protected PartOfSpeechTaggingFilter(TokenStream input) {
super(input);
posAtt = addAttribute(PartOfSpeechAttribute.class);
termAtt = addAttribute(TermAttribute.class);
}
public boolean incrementToken() throws IOException {
if (!input.incrementToken()) {return false;}
posAtt.setPartOfSpeech(determinePOS(termAtt.termBuffer(), 0, termAtt.termLength()));
return true;
}
// determine the part of speech for the given term
protected PartOfSpeech determinePOS(char[] term, int offset, int length) {
// naive implementation that tags every uppercased word as noun
if (length > 0 && Character.isUpperCase(term[0])) {
return PartOfSpeech.Noun;
}
return PartOfSpeech.Unknown;
}
}
</pre>
Just like the LengthFilter, this new filter accesses the attributes it needs in the constructor and
stores references in instance variables. Notice how you only need to pass in the interface of the new
Attribute and instantiating the correct class is automatically been taken care of.
Now we need to add the filter to the chain:
<pre>
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream stream = new WhitespaceTokenizer(reader);
stream = new LengthFilter(stream, 3, Integer.MAX_VALUE);
stream = new PartOfSpeechTaggingFilter(stream);
return stream;
}
</pre>
Now let's look at the output:
<pre>
This
demo
the
new
TokenStream
API
</pre>
Apparently it hasn't changed, which shows that adding a custom attribute to a TokenStream/Filter chain does not
affect any existing consumers, simply because they don't know the new Attribute. Now let's change the consumer
to make use of the new PartOfSpeechAttribute and print it out:
<pre>
public static void main(String[] args) throws IOException {
// text to tokenize
final String text = "This is a demo of the new TokenStream API";
MyAnalyzer analyzer = new MyAnalyzer();
TokenStream stream = analyzer.tokenStream("field", new StringReader(text));
// get the TermAttribute from the TokenStream
TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
// get the PartOfSpeechAttribute from the TokenStream
PartOfSpeechAttribute posAtt = stream.addAttribute(PartOfSpeechAttribute.class);
stream.reset();
// print all tokens until stream is exhausted
while (stream.incrementToken()) {
System.out.println(termAtt.term() + ": " + posAtt.getPartOfSpeech());
}
stream.end();
stream.close();
}
</pre>
The change that was made is to get the PartOfSpeechAttribute from the TokenStream and print out its contents in
the while loop that consumes the stream. Here is the new output:
<pre>
This: Noun
demo: Unknown
the: Unknown
new: Unknown
TokenStream: Noun
API: Noun
</pre>
Each word is now followed by its assigned PartOfSpeech tag. Of course this is a naive
part-of-speech tagging. The word 'This' should not even be tagged as noun; it is only spelled capitalized because it
is the first word of a sentence. Actually this is a good opportunity for an excerise. To practice the usage of the new
API the reader could now write an Attribute and TokenFilter that can specify for each word if it was the first token
of a sentence or not. Then the PartOfSpeechTaggingFilter can make use of this knowledge and only tag capitalized words
as nouns if not the first word of a sentence (we know, this is still not a correct behavior, but hey, it's a good exercise).
As a small hint, this is how the new Attribute class could begin:
<pre>
public class FirstTokenOfSentenceAttributeImpl extends Attribute
implements FirstTokenOfSentenceAttribute {
private boolean firstToken;
public void setFirstToken(boolean firstToken) {
this.firstToken = firstToken;
}
public boolean getFirstToken() {
return firstToken;
}
public void clear() {
firstToken = false;
}
...
</pre>
</body>
</html>
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/package.html | HTML | art | 31,040 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.AttributeSource;
/**
* Emits the entire input as a single token.
*/
public final class KeywordTokenizer extends Tokenizer {
private static final int DEFAULT_BUFFER_SIZE = 256;
private boolean done;
private int finalOffset;
private TermAttribute termAtt;
private OffsetAttribute offsetAtt;
public KeywordTokenizer(Reader input) {
this(input, DEFAULT_BUFFER_SIZE);
}
public KeywordTokenizer(Reader input, int bufferSize) {
super(input);
init(bufferSize);
}
public KeywordTokenizer(AttributeSource source, Reader input, int bufferSize) {
super(source, input);
init(bufferSize);
}
public KeywordTokenizer(AttributeFactory factory, Reader input, int bufferSize) {
super(factory, input);
init(bufferSize);
}
private void init(int bufferSize) {
this.done = false;
termAtt = addAttribute(TermAttribute.class);
offsetAtt = addAttribute(OffsetAttribute.class);
termAtt.resizeTermBuffer(bufferSize);
}
@Override
public final boolean incrementToken() throws IOException {
if (!done) {
clearAttributes();
done = true;
int upto = 0;
char[] buffer = termAtt.termBuffer();
while (true) {
final int length = input.read(buffer, upto, buffer.length-upto);
if (length == -1) break;
upto += length;
if (upto == buffer.length)
buffer = termAtt.resizeTermBuffer(1+buffer.length);
}
termAtt.setTermLength(upto);
finalOffset = correctOffset(upto);
offsetAtt.setOffset(correctOffset(0), finalOffset);
return true;
}
return false;
}
@Override
public final void end() {
// set final offset
offsetAtt.setOffset(finalOffset, finalOffset);
}
@Override
public void reset(Reader input) throws IOException {
super.reset(input);
this.done = false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/KeywordTokenizer.java | Java | art | 2,922 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
/** Transforms the token stream as per the Porter stemming algorithm.
Note: the input to the stemming filter must already be in lower case,
so you will need to use LowerCaseFilter or LowerCaseTokenizer farther
down the Tokenizer chain in order for this to work properly!
<P>
To use this filter with other analyzers, you'll want to write an
Analyzer class that sets up the TokenStream chain as you want it.
To use this with LowerCaseTokenizer, for example, you'd write an
analyzer like this:
<P>
<PRE>
class MyAnalyzer extends Analyzer {
public final TokenStream tokenStream(String fieldName, Reader reader) {
return new PorterStemFilter(new LowerCaseTokenizer(reader));
}
}
</PRE>
*/
public final class PorterStemFilter extends TokenFilter {
private PorterStemmer stemmer;
private TermAttribute termAtt;
public PorterStemFilter(TokenStream in) {
super(in);
stemmer = new PorterStemmer();
termAtt = addAttribute(TermAttribute.class);
}
@Override
public final boolean incrementToken() throws IOException {
if (!input.incrementToken())
return false;
if (stemmer.stem(termAtt.termBuffer(), 0, termAtt.termLength()))
termAtt.setTermBuffer(stemmer.getResultBuffer(), 0, stemmer.getResultLength());
return true;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/PorterStemFilter.java | Java | art | 2,270 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis;
import java.io.IOException;
/**
* Subclasses of CharFilter can be chained to filter CharStream.
* They can be used as {@link java.io.Reader} with additional offset
* correction. {@link Tokenizer}s will automatically use {@link #correctOffset}
* if a CharFilter/CharStream subclass is used.
*
* @version $Id$
*
*/
public abstract class CharFilter extends CharStream {
protected CharStream input;
protected CharFilter(CharStream in) {
input = in;
}
/**
* Subclass may want to override to correct the current offset.
*
* @param currentOff current offset
* @return corrected offset
*/
protected int correct(int currentOff) {
return currentOff;
}
/**
* Chains the corrected offset through the input
* CharFilter.
*/
@Override
public final int correctOffset(int currentOff) {
return input.correctOffset(correct(currentOff));
}
@Override
public void close() throws IOException {
input.close();
}
@Override
public int read(char[] cbuf, int off, int len) throws IOException {
return input.read(cbuf, off, len);
}
@Override
public boolean markSupported(){
return input.markSupported();
}
@Override
public void mark( int readAheadLimit ) throws IOException {
input.mark(readAheadLimit);
}
@Override
public void reset() throws IOException {
input.reset();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/CharFilter.java | Java | art | 2,213 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import java.io.IOException;
/** An Analyzer that uses {@link WhitespaceTokenizer}. */
public final class WhitespaceAnalyzer extends Analyzer {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new WhitespaceTokenizer(reader);
}
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
if (tokenizer == null) {
tokenizer = new WhitespaceTokenizer(reader);
setPreviousTokenStream(tokenizer);
} else
tokenizer.reset(reader);
return tokenizer;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/WhitespaceAnalyzer.java | Java | art | 1,500 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.document.NumericField; // for javadocs
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
import org.apache.lucene.search.NumericRangeFilter; // for javadocs
import org.apache.lucene.search.SortField; // for javadocs
import org.apache.lucene.search.FieldCache; // javadocs
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
/**
* <b>Expert:</b> This class provides a {@link TokenStream}
* for indexing numeric values that can be used by {@link
* NumericRangeQuery} or {@link NumericRangeFilter}.
*
* <p>Note that for simple usage, {@link NumericField} is
* recommended. {@link NumericField} disables norms and
* term freqs, as they are not usually needed during
* searching. If you need to change these settings, you
* should use this class.
*
* <p>See {@link NumericField} for capabilities of fields
* indexed numerically.</p>
*
* <p>Here's an example usage, for an <code>int</code> field:
*
* <pre>
* Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value));
* field.setOmitNorms(true);
* field.setOmitTermFreqAndPositions(true);
* document.add(field);
* </pre>
*
* <p>For optimal performance, re-use the TokenStream and Field instance
* for more than one document:
*
* <pre>
* NumericTokenStream stream = new NumericTokenStream(precisionStep);
* Field field = new Field(name, stream);
* field.setOmitNorms(true);
* field.setOmitTermFreqAndPositions(true);
* Document document = new Document();
* document.add(field);
*
* for(all documents) {
* stream.setIntValue(value)
* writer.addDocument(document);
* }
* </pre>
*
* <p>This stream is not intended to be used in analyzers;
* it's more for iterating the different precisions during
* indexing a specific numeric value.</p>
* <p><b>NOTE</b>: as token streams are only consumed once
* the document is added to the index, if you index more
* than one numeric field, use a separate <code>NumericTokenStream</code>
* instance for each.</p>
*
* <p>See {@link NumericRangeQuery} for more details on the
* <a
* href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* parameter as well as how numeric fields work under the hood.</p>
*
* <p><font color="red"><b>NOTE:</b> This API is experimental and
* might change in incompatible ways in the next release.</font>
*
* @since 2.9
*/
public final class NumericTokenStream extends TokenStream {
/** The full precision token gets this token type assigned. */
public static final String TOKEN_TYPE_FULL_PREC = "fullPrecNumeric";
/** The lower precision tokens gets this token type assigned. */
public static final String TOKEN_TYPE_LOWER_PREC = "lowerPrecNumeric";
/**
* Creates a token stream for numeric values using the default <code>precisionStep</code>
* {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). The stream is not yet initialized,
* before using set a value using the various set<em>???</em>Value() methods.
*/
public NumericTokenStream() {
this(NumericUtils.PRECISION_STEP_DEFAULT);
}
/**
* Creates a token stream for numeric values with the specified
* <code>precisionStep</code>. The stream is not yet initialized,
* before using set a value using the various set<em>???</em>Value() methods.
*/
public NumericTokenStream(final int precisionStep) {
super();
this.precisionStep = precisionStep;
if (precisionStep < 1)
throw new IllegalArgumentException("precisionStep must be >=1");
}
/**
* Expert: Creates a token stream for numeric values with the specified
* <code>precisionStep</code> using the given {@link AttributeSource}.
* The stream is not yet initialized,
* before using set a value using the various set<em>???</em>Value() methods.
*/
public NumericTokenStream(AttributeSource source, final int precisionStep) {
super(source);
this.precisionStep = precisionStep;
if (precisionStep < 1)
throw new IllegalArgumentException("precisionStep must be >=1");
}
/**
* Expert: Creates a token stream for numeric values with the specified
* <code>precisionStep</code> using the given
* {@link org.apache.lucene.util.AttributeSource.AttributeFactory}.
* The stream is not yet initialized,
* before using set a value using the various set<em>???</em>Value() methods.
*/
public NumericTokenStream(AttributeFactory factory, final int precisionStep) {
super(factory);
this.precisionStep = precisionStep;
if (precisionStep < 1)
throw new IllegalArgumentException("precisionStep must be >=1");
}
/**
* Initializes the token stream with the supplied <code>long</code> value.
* @param value the value, for which this TokenStream should enumerate tokens.
* @return this instance, because of this you can use it the following way:
* <code>new Field(name, new NumericTokenStream(precisionStep).setLongValue(value))</code>
*/
public NumericTokenStream setLongValue(final long value) {
this.value = value;
valSize = 64;
shift = 0;
return this;
}
/**
* Initializes the token stream with the supplied <code>int</code> value.
* @param value the value, for which this TokenStream should enumerate tokens.
* @return this instance, because of this you can use it the following way:
* <code>new Field(name, new NumericTokenStream(precisionStep).setIntValue(value))</code>
*/
public NumericTokenStream setIntValue(final int value) {
this.value = (long) value;
valSize = 32;
shift = 0;
return this;
}
/**
* Initializes the token stream with the supplied <code>double</code> value.
* @param value the value, for which this TokenStream should enumerate tokens.
* @return this instance, because of this you can use it the following way:
* <code>new Field(name, new NumericTokenStream(precisionStep).setDoubleValue(value))</code>
*/
public NumericTokenStream setDoubleValue(final double value) {
this.value = NumericUtils.doubleToSortableLong(value);
valSize = 64;
shift = 0;
return this;
}
/**
* Initializes the token stream with the supplied <code>float</code> value.
* @param value the value, for which this TokenStream should enumerate tokens.
* @return this instance, because of this you can use it the following way:
* <code>new Field(name, new NumericTokenStream(precisionStep).setFloatValue(value))</code>
*/
public NumericTokenStream setFloatValue(final float value) {
this.value = (long) NumericUtils.floatToSortableInt(value);
valSize = 32;
shift = 0;
return this;
}
@Override
public void reset() {
if (valSize == 0)
throw new IllegalStateException("call set???Value() before usage");
shift = 0;
}
@Override
public boolean incrementToken() {
if (valSize == 0)
throw new IllegalStateException("call set???Value() before usage");
if (shift >= valSize)
return false;
clearAttributes();
final char[] buffer;
switch (valSize) {
case 64:
buffer = termAtt.resizeTermBuffer(NumericUtils.BUF_SIZE_LONG);
termAtt.setTermLength(NumericUtils.longToPrefixCoded(value, shift, buffer));
break;
case 32:
buffer = termAtt.resizeTermBuffer(NumericUtils.BUF_SIZE_INT);
termAtt.setTermLength(NumericUtils.intToPrefixCoded((int) value, shift, buffer));
break;
default:
// should not happen
throw new IllegalArgumentException("valSize must be 32 or 64");
}
typeAtt.setType((shift == 0) ? TOKEN_TYPE_FULL_PREC : TOKEN_TYPE_LOWER_PREC);
posIncrAtt.setPositionIncrement((shift == 0) ? 1 : 0);
shift += precisionStep;
return true;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("(numeric,valSize=").append(valSize);
sb.append(",precisionStep=").append(precisionStep).append(')');
return sb.toString();
}
// members
private final TermAttribute termAtt = addAttribute(TermAttribute.class);
private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
private int shift = 0, valSize = 0; // valSize==0 means not initialized
private final int precisionStep;
private long value = 0L;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java | Java | art | 9,508 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.Arrays;
import java.util.Set;
import java.util.List;
import org.apache.lucene.util.Version;
/** Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and {@link StopFilter}.
*
* <a name="version"/>
* <p>You must specify the required {@link Version}
* compatibility when creating StopAnalyzer:
* <ul>
* <li> As of 2.9, position increments are preserved
* </ul>
*/
public final class StopAnalyzer extends Analyzer {
private final Set<?> stopWords;
private final boolean enablePositionIncrements;
/** An unmodifiable set containing some common English words that are not usually useful
for searching.*/
public static final Set<?> ENGLISH_STOP_WORDS_SET;
static {
final List<String> stopWords = Arrays.asList(
"a", "an", "and", "are", "as", "at", "be", "but", "by",
"for", "if", "in", "into", "is", "it",
"no", "not", "of", "on", "or", "such",
"that", "the", "their", "then", "there", "these",
"they", "this", "to", "was", "will", "with"
);
final CharArraySet stopSet = new CharArraySet(stopWords.size(), false);
stopSet.addAll(stopWords);
ENGLISH_STOP_WORDS_SET = CharArraySet.unmodifiableSet(stopSet);
}
/** Builds an analyzer which removes words in
* {@link #ENGLISH_STOP_WORDS_SET}.
* @param matchVersion See <a href="#version">above</a>
*/
public StopAnalyzer(Version matchVersion) {
stopWords = ENGLISH_STOP_WORDS_SET;
enablePositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
}
/** Builds an analyzer with the stop words from the given set.
* @param matchVersion See <a href="#version">above</a>
* @param stopWords Set of stop words */
public StopAnalyzer(Version matchVersion, Set<?> stopWords) {
this.stopWords = stopWords;
enablePositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
}
/** Builds an analyzer with the stop words from the given file.
* @see WordlistLoader#getWordSet(File)
* @param matchVersion See <a href="#version">above</a>
* @param stopwordsFile File to load stop words from */
public StopAnalyzer(Version matchVersion, File stopwordsFile) throws IOException {
stopWords = WordlistLoader.getWordSet(stopwordsFile);
this.enablePositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
}
/** Builds an analyzer with the stop words from the given reader.
* @see WordlistLoader#getWordSet(Reader)
* @param matchVersion See <a href="#version">above</a>
* @param stopwords Reader to load stop words from */
public StopAnalyzer(Version matchVersion, Reader stopwords) throws IOException {
stopWords = WordlistLoader.getWordSet(stopwords);
this.enablePositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
}
/** Filters LowerCaseTokenizer with StopFilter. */
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new StopFilter(enablePositionIncrements, new LowerCaseTokenizer(reader), stopWords);
}
/** Filters LowerCaseTokenizer with StopFilter. */
private class SavedStreams {
Tokenizer source;
TokenStream result;
};
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
SavedStreams streams = (SavedStreams) getPreviousTokenStream();
if (streams == null) {
streams = new SavedStreams();
streams.source = new LowerCaseTokenizer(reader);
streams.result = new StopFilter(enablePositionIncrements, streams.source, stopWords);
setPreviousTokenStream(streams);
} else
streams.source.reset(reader);
return streams.result;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/StopAnalyzer.java | Java | art | 4,656 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis;
import java.io.IOException;
import java.io.Reader;
/**
* CharReader is a Reader wrapper. It reads chars from
* Reader and outputs {@link CharStream}, defining an
* identify function {@link #correctOffset} method that
* simply returns the provided offset.
*/
public final class CharReader extends CharStream {
protected Reader input;
public static CharStream get(Reader input) {
return input instanceof CharStream ?
(CharStream)input : new CharReader(input);
}
private CharReader(Reader in) {
input = in;
}
@Override
public int correctOffset(int currentOff) {
return currentOff;
}
@Override
public void close() throws IOException {
input.close();
}
@Override
public int read(char[] cbuf, int off, int len) throws IOException {
return input.read(cbuf, off, len);
}
@Override
public boolean markSupported(){
return input.markSupported();
}
@Override
public void mark( int readAheadLimit ) throws IOException {
input.mark(readAheadLimit);
}
@Override
public void reset() throws IOException {
input.reset();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/CharReader.java | Java | art | 1,949 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
/**
* Normalizes token text to lower case.
*/
public final class LowerCaseFilter extends TokenFilter {
public LowerCaseFilter(TokenStream in) {
super(in);
termAtt = addAttribute(TermAttribute.class);
}
private TermAttribute termAtt;
@Override
public final boolean incrementToken() throws IOException {
if (input.incrementToken()) {
final char[] buffer = termAtt.termBuffer();
final int length = termAtt.termLength();
for(int i=0;i<length;i++)
buffer[i] = Character.toLowerCase(buffer[i]);
return true;
} else
return false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/LowerCaseFilter.java | Java | art | 1,537 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis;
import java.io.Reader;
/**
* CharStream adds {@link #correctOffset}
* functionality over {@link Reader}. All Tokenizers accept a
* CharStream instead of {@link Reader} as input, which enables
* arbitrary character based filtering before tokenization.
* The {@link #correctOffset} method fixed offsets to account for
* removal or insertion of characters, so that the offsets
* reported in the tokens match the character offsets of the
* original Reader.
*/
public abstract class CharStream extends Reader {
/**
* Called by CharFilter(s) and Tokenizer to correct token offset.
*
* @param currentOff offset as seen in the output
* @return corrected offset based on the input
*/
public abstract int correctOffset(int currentOff);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/CharStream.java | Java | art | 1,601 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.index.Payload;
import org.apache.lucene.index.TermPositions; // for javadoc
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.AttributeImpl;
/**
A Token is an occurrence of a term from the text of a field. It consists of
a term's text, the start and end offset of the term in the text of the field,
and a type string.
<p>
The start and end offsets permit applications to re-associate a token with
its source text, e.g., to display highlighted query terms in a document
browser, or to show matching text fragments in a <abbr title="KeyWord In Context">KWIC</abbr>
display, etc.
<p>
The type is a string, assigned by a lexical analyzer
(a.k.a. tokenizer), naming the lexical or syntactic class that the token
belongs to. For example an end of sentence marker token might be implemented
with type "eos". The default token type is "word".
<p>
A Token can optionally have metadata (a.k.a. Payload) in the form of a variable
length byte array. Use {@link TermPositions#getPayloadLength()} and
{@link TermPositions#getPayload(byte[], int)} to retrieve the payloads from the index.
<br><br>
<p><b>NOTE:</b> As of 2.9, Token implements all {@link Attribute} interfaces
that are part of core Lucene and can be found in the {@code tokenattributes} subpackage.
Even though it is not necessary to use Token anymore, with the new TokenStream API it can
be used as convenience class that implements all {@link Attribute}s, which is especially useful
to easily switch from the old to the new TokenStream API.
<br><br>
<p>Tokenizers and TokenFilters should try to re-use a Token
instance when possible for best performance, by
implementing the {@link TokenStream#incrementToken()} API.
Failing that, to create a new Token you should first use
one of the constructors that starts with null text. To load
the token from a char[] use {@link #setTermBuffer(char[], int, int)}.
To load from a String use {@link #setTermBuffer(String)} or {@link #setTermBuffer(String, int, int)}.
Alternatively you can get the Token's termBuffer by calling either {@link #termBuffer()},
if you know that your text is shorter than the capacity of the termBuffer
or {@link #resizeTermBuffer(int)}, if there is any possibility
that you may need to grow the buffer. Fill in the characters of your term into this
buffer, with {@link String#getChars(int, int, char[], int)} if loading from a string,
or with {@link System#arraycopy(Object, int, Object, int, int)}, and finally call {@link #setTermLength(int)} to
set the length of the term text. See <a target="_top"
href="https://issues.apache.org/jira/browse/LUCENE-969">LUCENE-969</a>
for details.</p>
<p>Typical Token reuse patterns:
<ul>
<li> Copying text from a string (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
<pre>
return reusableToken.reinit(string, startOffset, endOffset[, type]);
</pre>
</li>
<li> Copying some text from a string (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
<pre>
return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
</pre>
</li>
</li>
<li> Copying text from char[] buffer (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
<pre>
return reusableToken.reinit(buffer, 0, buffer.length, startOffset, endOffset[, type]);
</pre>
</li>
<li> Copying some text from a char[] buffer (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
<pre>
return reusableToken.reinit(buffer, start, end - start, startOffset, endOffset[, type]);
</pre>
</li>
<li> Copying from one one Token to another (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
<pre>
return reusableToken.reinit(source.termBuffer(), 0, source.termLength(), source.startOffset(), source.endOffset()[, source.type()]);
</pre>
</li>
</ul>
A few things to note:
<ul>
<li>clear() initializes all of the fields to default values. This was changed in contrast to Lucene 2.4, but should affect no one.</li>
<li>Because <code>TokenStreams</code> can be chained, one cannot assume that the <code>Token's</code> current type is correct.</li>
<li>The startOffset and endOffset represent the start and offset in the source text, so be careful in adjusting them.</li>
<li>When caching a reusable token, clone it. When injecting a cached token into a stream that can be reset, clone it again.</li>
</ul>
</p>
@see org.apache.lucene.index.Payload
*/
public class Token extends AttributeImpl
implements Cloneable, TermAttribute, TypeAttribute, PositionIncrementAttribute,
FlagsAttribute, OffsetAttribute, PayloadAttribute {
public static final String DEFAULT_TYPE = "word";
private static int MIN_BUFFER_SIZE = 10;
private char[] termBuffer;
private int termLength;
private int startOffset,endOffset;
private String type = DEFAULT_TYPE;
private int flags;
private Payload payload;
private int positionIncrement = 1;
/** Constructs a Token will null text. */
public Token() {
}
/** Constructs a Token with null text and start & end
* offsets.
* @param start start offset in the source text
* @param end end offset in the source text */
public Token(int start, int end) {
startOffset = start;
endOffset = end;
}
/** Constructs a Token with null text and start & end
* offsets plus the Token type.
* @param start start offset in the source text
* @param end end offset in the source text
* @param typ the lexical type of this Token */
public Token(int start, int end, String typ) {
startOffset = start;
endOffset = end;
type = typ;
}
/**
* Constructs a Token with null text and start & end
* offsets plus flags. NOTE: flags is EXPERIMENTAL.
* @param start start offset in the source text
* @param end end offset in the source text
* @param flags The bits to set for this token
*/
public Token(int start, int end, int flags) {
startOffset = start;
endOffset = end;
this.flags = flags;
}
/** Constructs a Token with the given term text, and start
* & end offsets. The type defaults to "word."
* <b>NOTE:</b> for better indexing speed you should
* instead use the char[] termBuffer methods to set the
* term text.
* @param text term text
* @param start start offset
* @param end end offset
*/
public Token(String text, int start, int end) {
setTermBuffer(text);
startOffset = start;
endOffset = end;
}
/** Constructs a Token with the given text, start and end
* offsets, & type. <b>NOTE:</b> for better indexing
* speed you should instead use the char[] termBuffer
* methods to set the term text.
* @param text term text
* @param start start offset
* @param end end offset
* @param typ token type
*/
public Token(String text, int start, int end, String typ) {
setTermBuffer(text);
startOffset = start;
endOffset = end;
type = typ;
}
/**
* Constructs a Token with the given text, start and end
* offsets, & type. <b>NOTE:</b> for better indexing
* speed you should instead use the char[] termBuffer
* methods to set the term text.
* @param text
* @param start
* @param end
* @param flags token type bits
*/
public Token(String text, int start, int end, int flags) {
setTermBuffer(text);
startOffset = start;
endOffset = end;
this.flags = flags;
}
/**
* Constructs a Token with the given term buffer (offset
* & length), start and end
* offsets
* @param startTermBuffer
* @param termBufferOffset
* @param termBufferLength
* @param start
* @param end
*/
public Token(char[] startTermBuffer, int termBufferOffset, int termBufferLength, int start, int end) {
setTermBuffer(startTermBuffer, termBufferOffset, termBufferLength);
startOffset = start;
endOffset = end;
}
/** Set the position increment. This determines the position of this token
* relative to the previous Token in a {@link TokenStream}, used in phrase
* searching.
*
* <p>The default value is one.
*
* <p>Some common uses for this are:<ul>
*
* <li>Set it to zero to put multiple terms in the same position. This is
* useful if, e.g., a word has multiple stems. Searches for phrases
* including either stem will match. In this case, all but the first stem's
* increment should be set to zero: the increment of the first instance
* should be one. Repeating a token with an increment of zero can also be
* used to boost the scores of matches on that token.
*
* <li>Set it to values greater than one to inhibit exact phrase matches.
* If, for example, one does not want phrases to match across removed stop
* words, then one could build a stop word filter that removes stop words and
* also sets the increment to the number of stop words removed before each
* non-stop word. Then exact phrase queries will only match when the terms
* occur with no intervening stop words.
*
* </ul>
* @param positionIncrement the distance from the prior term
* @see org.apache.lucene.index.TermPositions
*/
public void setPositionIncrement(int positionIncrement) {
if (positionIncrement < 0)
throw new IllegalArgumentException
("Increment must be zero or greater: " + positionIncrement);
this.positionIncrement = positionIncrement;
}
/** Returns the position increment of this Token.
* @see #setPositionIncrement
*/
public int getPositionIncrement() {
return positionIncrement;
}
/** Returns the Token's term text.
*
* This method has a performance penalty
* because the text is stored internally in a char[]. If
* possible, use {@link #termBuffer()} and {@link
* #termLength()} directly instead. If you really need a
* String, use this method, which is nothing more than
* a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
*/
public final String term() {
initTermBuffer();
return new String(termBuffer, 0, termLength);
}
/** Copies the contents of buffer, starting at offset for
* length characters, into the termBuffer array.
* @param buffer the buffer to copy
* @param offset the index in the buffer of the first character to copy
* @param length the number of characters to copy
*/
public final void setTermBuffer(char[] buffer, int offset, int length) {
growTermBuffer(length);
System.arraycopy(buffer, offset, termBuffer, 0, length);
termLength = length;
}
/** Copies the contents of buffer into the termBuffer array.
* @param buffer the buffer to copy
*/
public final void setTermBuffer(String buffer) {
final int length = buffer.length();
growTermBuffer(length);
buffer.getChars(0, length, termBuffer, 0);
termLength = length;
}
/** Copies the contents of buffer, starting at offset and continuing
* for length characters, into the termBuffer array.
* @param buffer the buffer to copy
* @param offset the index in the buffer of the first character to copy
* @param length the number of characters to copy
*/
public final void setTermBuffer(String buffer, int offset, int length) {
assert offset <= buffer.length();
assert offset + length <= buffer.length();
growTermBuffer(length);
buffer.getChars(offset, offset + length, termBuffer, 0);
termLength = length;
}
/** Returns the internal termBuffer character array which
* you can then directly alter. If the array is too
* small for your token, use {@link
* #resizeTermBuffer(int)} to increase it. After
* altering the buffer be sure to call {@link
* #setTermLength} to record the number of valid
* characters that were placed into the termBuffer. */
public final char[] termBuffer() {
initTermBuffer();
return termBuffer;
}
/** Grows the termBuffer to at least size newSize, preserving the
* existing content. Note: If the next operation is to change
* the contents of the term buffer use
* {@link #setTermBuffer(char[], int, int)},
* {@link #setTermBuffer(String)}, or
* {@link #setTermBuffer(String, int, int)}
* to optimally combine the resize with the setting of the termBuffer.
* @param newSize minimum size of the new termBuffer
* @return newly created termBuffer with length >= newSize
*/
public char[] resizeTermBuffer(int newSize) {
if (termBuffer == null) {
// The buffer is always at least MIN_BUFFER_SIZE
termBuffer = new char[ArrayUtil.getNextSize(newSize < MIN_BUFFER_SIZE ? MIN_BUFFER_SIZE : newSize)];
} else {
if(termBuffer.length < newSize){
// Not big enough; create a new array with slight
// over allocation and preserve content
final char[] newCharBuffer = new char[ArrayUtil.getNextSize(newSize)];
System.arraycopy(termBuffer, 0, newCharBuffer, 0, termBuffer.length);
termBuffer = newCharBuffer;
}
}
return termBuffer;
}
/** Allocates a buffer char[] of at least newSize, without preserving the existing content.
* its always used in places that set the content
* @param newSize minimum size of the buffer
*/
private void growTermBuffer(int newSize) {
if (termBuffer == null) {
// The buffer is always at least MIN_BUFFER_SIZE
termBuffer = new char[ArrayUtil.getNextSize(newSize < MIN_BUFFER_SIZE ? MIN_BUFFER_SIZE : newSize)];
} else {
if(termBuffer.length < newSize){
// Not big enough; create a new array with slight
// over allocation:
termBuffer = new char[ArrayUtil.getNextSize(newSize)];
}
}
}
private void initTermBuffer() {
if (termBuffer == null) {
termBuffer = new char[ArrayUtil.getNextSize(MIN_BUFFER_SIZE)];
termLength = 0;
}
}
/** Return number of valid characters (length of the term)
* in the termBuffer array. */
public final int termLength() {
initTermBuffer();
return termLength;
}
/** Set number of valid characters (length of the term) in
* the termBuffer array. Use this to truncate the termBuffer
* or to synchronize with external manipulation of the termBuffer.
* Note: to grow the size of the array,
* use {@link #resizeTermBuffer(int)} first.
* @param length the truncated length
*/
public final void setTermLength(int length) {
initTermBuffer();
if (length > termBuffer.length)
throw new IllegalArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.length + ")");
termLength = length;
}
/** Returns this Token's starting offset, the position of the first character
corresponding to this token in the source text.
Note that the difference between endOffset() and startOffset() may not be
equal to {@link #termLength}, as the term text may have been altered by a
stemmer or some other filter. */
public final int startOffset() {
return startOffset;
}
/** Set the starting offset.
@see #startOffset() */
public void setStartOffset(int offset) {
this.startOffset = offset;
}
/** Returns this Token's ending offset, one greater than the position of the
last character corresponding to this token in the source text. The length
of the token in the source text is (endOffset - startOffset). */
public final int endOffset() {
return endOffset;
}
/** Set the ending offset.
@see #endOffset() */
public void setEndOffset(int offset) {
this.endOffset = offset;
}
/** Set the starting and ending offset.
@see #startOffset() and #endOffset()*/
public void setOffset(int startOffset, int endOffset) {
this.startOffset = startOffset;
this.endOffset = endOffset;
}
/** Returns this Token's lexical type. Defaults to "word". */
public final String type() {
return type;
}
/** Set the lexical type.
@see #type() */
public final void setType(String type) {
this.type = type;
}
/**
* EXPERIMENTAL: While we think this is here to stay, we may want to change it to be a long.
* <p/>
*
* Get the bitset for any bits that have been set. This is completely distinct from {@link #type()}, although they do share similar purposes.
* The flags can be used to encode information about the token for use by other {@link org.apache.lucene.analysis.TokenFilter}s.
*
*
* @return The bits
*/
public int getFlags() {
return flags;
}
/**
* @see #getFlags()
*/
public void setFlags(int flags) {
this.flags = flags;
}
/**
* Returns this Token's payload.
*/
public Payload getPayload() {
return this.payload;
}
/**
* Sets this Token's payload.
*/
public void setPayload(Payload payload) {
this.payload = payload;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('(');
initTermBuffer();
if (termBuffer == null)
sb.append("null");
else
sb.append(termBuffer, 0, termLength);
sb.append(',').append(startOffset).append(',').append(endOffset);
if (!type.equals("word"))
sb.append(",type=").append(type);
if (positionIncrement != 1)
sb.append(",posIncr=").append(positionIncrement);
sb.append(')');
return sb.toString();
}
/** Resets the term text, payload, flags, and positionIncrement,
* startOffset, endOffset and token type to default.
*/
@Override
public void clear() {
payload = null;
// Leave termBuffer to allow re-use
termLength = 0;
positionIncrement = 1;
flags = 0;
startOffset = endOffset = 0;
type = DEFAULT_TYPE;
}
@Override
public Object clone() {
Token t = (Token)super.clone();
// Do a deep clone
if (termBuffer != null) {
t.termBuffer = (char[]) termBuffer.clone();
}
if (payload != null) {
t.payload = (Payload) payload.clone();
}
return t;
}
/** Makes a clone, but replaces the term buffer &
* start/end offset in the process. This is more
* efficient than doing a full clone (and then calling
* setTermBuffer) because it saves a wasted copy of the old
* termBuffer. */
public Token clone(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset) {
final Token t = new Token(newTermBuffer, newTermOffset, newTermLength, newStartOffset, newEndOffset);
t.positionIncrement = positionIncrement;
t.flags = flags;
t.type = type;
if (payload != null)
t.payload = (Payload) payload.clone();
return t;
}
@Override
public boolean equals(Object obj) {
if (obj == this)
return true;
if (obj instanceof Token) {
Token other = (Token) obj;
initTermBuffer();
other.initTermBuffer();
if (termLength == other.termLength &&
startOffset == other.startOffset &&
endOffset == other.endOffset &&
flags == other.flags &&
positionIncrement == other.positionIncrement &&
subEqual(type, other.type) &&
subEqual(payload, other.payload)) {
for(int i=0;i<termLength;i++)
if (termBuffer[i] != other.termBuffer[i])
return false;
return true;
} else
return false;
} else
return false;
}
private boolean subEqual(Object o1, Object o2) {
if (o1 == null)
return o2 == null;
else
return o1.equals(o2);
}
@Override
public int hashCode() {
initTermBuffer();
int code = termLength;
code = code * 31 + startOffset;
code = code * 31 + endOffset;
code = code * 31 + flags;
code = code * 31 + positionIncrement;
code = code * 31 + type.hashCode();
code = (payload == null ? code : code * 31 + payload.hashCode());
code = code * 31 + ArrayUtil.hashCode(termBuffer, 0, termLength);
return code;
}
// like clear() but doesn't clear termBuffer/text
private void clearNoTermBuffer() {
payload = null;
positionIncrement = 1;
flags = 0;
startOffset = endOffset = 0;
type = DEFAULT_TYPE;
}
/** Shorthand for calling {@link #clear},
* {@link #setTermBuffer(char[], int, int)},
* {@link #setStartOffset},
* {@link #setEndOffset},
* {@link #setType}
* @return this Token instance */
public Token reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, String newType) {
clearNoTermBuffer();
payload = null;
positionIncrement = 1;
setTermBuffer(newTermBuffer, newTermOffset, newTermLength);
startOffset = newStartOffset;
endOffset = newEndOffset;
type = newType;
return this;
}
/** Shorthand for calling {@link #clear},
* {@link #setTermBuffer(char[], int, int)},
* {@link #setStartOffset},
* {@link #setEndOffset}
* {@link #setType} on Token.DEFAULT_TYPE
* @return this Token instance */
public Token reinit(char[] newTermBuffer, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset) {
clearNoTermBuffer();
setTermBuffer(newTermBuffer, newTermOffset, newTermLength);
startOffset = newStartOffset;
endOffset = newEndOffset;
type = DEFAULT_TYPE;
return this;
}
/** Shorthand for calling {@link #clear},
* {@link #setTermBuffer(String)},
* {@link #setStartOffset},
* {@link #setEndOffset}
* {@link #setType}
* @return this Token instance */
public Token reinit(String newTerm, int newStartOffset, int newEndOffset, String newType) {
clearNoTermBuffer();
setTermBuffer(newTerm);
startOffset = newStartOffset;
endOffset = newEndOffset;
type = newType;
return this;
}
/** Shorthand for calling {@link #clear},
* {@link #setTermBuffer(String, int, int)},
* {@link #setStartOffset},
* {@link #setEndOffset}
* {@link #setType}
* @return this Token instance */
public Token reinit(String newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset, String newType) {
clearNoTermBuffer();
setTermBuffer(newTerm, newTermOffset, newTermLength);
startOffset = newStartOffset;
endOffset = newEndOffset;
type = newType;
return this;
}
/** Shorthand for calling {@link #clear},
* {@link #setTermBuffer(String)},
* {@link #setStartOffset},
* {@link #setEndOffset}
* {@link #setType} on Token.DEFAULT_TYPE
* @return this Token instance */
public Token reinit(String newTerm, int newStartOffset, int newEndOffset) {
clearNoTermBuffer();
setTermBuffer(newTerm);
startOffset = newStartOffset;
endOffset = newEndOffset;
type = DEFAULT_TYPE;
return this;
}
/** Shorthand for calling {@link #clear},
* {@link #setTermBuffer(String, int, int)},
* {@link #setStartOffset},
* {@link #setEndOffset}
* {@link #setType} on Token.DEFAULT_TYPE
* @return this Token instance */
public Token reinit(String newTerm, int newTermOffset, int newTermLength, int newStartOffset, int newEndOffset) {
clearNoTermBuffer();
setTermBuffer(newTerm, newTermOffset, newTermLength);
startOffset = newStartOffset;
endOffset = newEndOffset;
type = DEFAULT_TYPE;
return this;
}
/**
* Copy the prototype token's fields into this one. Note: Payloads are shared.
* @param prototype
*/
public void reinit(Token prototype) {
prototype.initTermBuffer();
setTermBuffer(prototype.termBuffer, 0, prototype.termLength);
positionIncrement = prototype.positionIncrement;
flags = prototype.flags;
startOffset = prototype.startOffset;
endOffset = prototype.endOffset;
type = prototype.type;
payload = prototype.payload;
}
/**
* Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared.
* @param prototype
* @param newTerm
*/
public void reinit(Token prototype, String newTerm) {
setTermBuffer(newTerm);
positionIncrement = prototype.positionIncrement;
flags = prototype.flags;
startOffset = prototype.startOffset;
endOffset = prototype.endOffset;
type = prototype.type;
payload = prototype.payload;
}
/**
* Copy the prototype token's fields into this one, with a different term. Note: Payloads are shared.
* @param prototype
* @param newTermBuffer
* @param offset
* @param length
*/
public void reinit(Token prototype, char[] newTermBuffer, int offset, int length) {
setTermBuffer(newTermBuffer, offset, length);
positionIncrement = prototype.positionIncrement;
flags = prototype.flags;
startOffset = prototype.startOffset;
endOffset = prototype.endOffset;
type = prototype.type;
payload = prototype.payload;
}
@Override
public void copyTo(AttributeImpl target) {
if (target instanceof Token) {
final Token to = (Token) target;
to.reinit(this);
// reinit shares the payload, so clone it:
if (payload !=null) {
to.payload = (Payload) payload.clone();
}
} else {
initTermBuffer();
((TermAttribute) target).setTermBuffer(termBuffer, 0, termLength);
((OffsetAttribute) target).setOffset(startOffset, endOffset);
((PositionIncrementAttribute) target).setPositionIncrement(positionIncrement);
((PayloadAttribute) target).setPayload((payload == null) ? null : (Payload) payload.clone());
((FlagsAttribute) target).setFlags(flags);
((TypeAttribute) target).setType(type);
}
}
/** Convenience factory that returns <code>Token</code> as implementation for the basic
* attributes and return the default impl (with "Impl" appended) for all other
* attributes.
* @since 3.0
*/
public static final AttributeSource.AttributeFactory TOKEN_ATTRIBUTE_FACTORY =
new TokenAttributeFactory(AttributeSource.AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY);
/** <b>Expert:</b> Creates a TokenAttributeFactory returning {@link Token} as instance for the basic attributes
* and for all other attributes calls the given delegate factory.
* @since 3.0
*/
public static final class TokenAttributeFactory extends AttributeSource.AttributeFactory {
private final AttributeSource.AttributeFactory delegate;
/** <b>Expert</b>: Creates an AttributeFactory returning {@link Token} as instance for the basic attributes
* and for all other attributes calls the given delegate factory. */
public TokenAttributeFactory(AttributeSource.AttributeFactory delegate) {
this.delegate = delegate;
}
@Override
public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
return attClass.isAssignableFrom(Token.class)
? new Token() : delegate.createAttributeInstance(attClass);
}
@Override
public boolean equals(Object other) {
if (this == other) return true;
if (other instanceof TokenAttributeFactory) {
final TokenAttributeFactory af = (TokenAttributeFactory) other;
return this.delegate.equals(af.delegate);
}
return false;
}
@Override
public int hashCode() {
return delegate.hashCode() ^ 0x0a45aa31;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/Token.java | Java | art | 28,825 |
package org.apache.lucene.analysis.standard;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
/** Normalizes tokens extracted with {@link StandardTokenizer}. */
public final class StandardFilter extends TokenFilter {
/** Construct filtering <i>in</i>. */
public StandardFilter(TokenStream in) {
super(in);
termAtt = addAttribute(TermAttribute.class);
typeAtt = addAttribute(TypeAttribute.class);
}
private static final String APOSTROPHE_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.APOSTROPHE];
private static final String ACRONYM_TYPE = StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM];
// this filters uses attribute type
private TypeAttribute typeAtt;
private TermAttribute termAtt;
/** Returns the next token in the stream, or null at EOS.
* <p>Removes <tt>'s</tt> from the end of words.
* <p>Removes dots from acronyms.
*/
@Override
public final boolean incrementToken() throws java.io.IOException {
if (!input.incrementToken()) {
return false;
}
char[] buffer = termAtt.termBuffer();
final int bufferLength = termAtt.termLength();
final String type = typeAtt.type();
if (type == APOSTROPHE_TYPE && // remove 's
bufferLength >= 2 &&
buffer[bufferLength-2] == '\'' &&
(buffer[bufferLength-1] == 's' || buffer[bufferLength-1] == 'S')) {
// Strip last 2 characters off
termAtt.setTermLength(bufferLength - 2);
} else if (type == ACRONYM_TYPE) { // remove dots
int upto = 0;
for(int i=0;i<bufferLength;i++) {
char c = buffer[i];
if (c != '.')
buffer[upto++] = c;
}
termAtt.setTermLength(upto);
}
return true;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/standard/StandardFilter.java | Java | art | 2,717 |
<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
<body>
A fast grammar-based tokenizer constructed with JFlex.
</body>
</html>
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/standard/package.html | HTML | art | 1,015 |
package org.apache.lucene.analysis.standard;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.*;
import org.apache.lucene.util.Version;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.Set;
/**
* Filters {@link StandardTokenizer} with {@link StandardFilter}, {@link
* LowerCaseFilter} and {@link StopFilter}, using a list of
* English stop words.
*
* <a name="version"/>
* <p>You must specify the required {@link Version}
* compatibility when creating StandardAnalyzer:
* <ul>
* <li> As of 2.9, StopFilter preserves position
* increments
* <li> As of 2.4, Tokens incorrectly identified as acronyms
* are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>
* </ul>
*/
public class StandardAnalyzer extends Analyzer {
private Set<?> stopSet;
/**
* Specifies whether deprecated acronyms should be replaced with HOST type.
* See {@linkplain https://issues.apache.org/jira/browse/LUCENE-1068}
*/
private final boolean replaceInvalidAcronym,enableStopPositionIncrements;
/** An unmodifiable set containing some common English words that are usually not
useful for searching. */
public static final Set<?> STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
private final Version matchVersion;
/** Builds an analyzer with the default stop words ({@link
* #STOP_WORDS_SET}).
* @param matchVersion Lucene version to match See {@link
* <a href="#version">above</a>}
*/
public StandardAnalyzer(Version matchVersion) {
this(matchVersion, STOP_WORDS_SET);
}
/** Builds an analyzer with the given stop words.
* @param matchVersion Lucene version to match See {@link
* <a href="#version">above</a>}
* @param stopWords stop words */
public StandardAnalyzer(Version matchVersion, Set<?> stopWords) {
stopSet = stopWords;
setOverridesTokenStreamMethod(StandardAnalyzer.class);
enableStopPositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
replaceInvalidAcronym = matchVersion.onOrAfter(Version.LUCENE_24);
this.matchVersion = matchVersion;
}
/** Builds an analyzer with the stop words from the given file.
* @see WordlistLoader#getWordSet(File)
* @param matchVersion Lucene version to match See {@link
* <a href="#version">above</a>}
* @param stopwords File to read stop words from */
public StandardAnalyzer(Version matchVersion, File stopwords) throws IOException {
this(matchVersion, WordlistLoader.getWordSet(stopwords));
}
/** Builds an analyzer with the stop words from the given reader.
* @see WordlistLoader#getWordSet(Reader)
* @param matchVersion Lucene version to match See {@link
* <a href="#version">above</a>}
* @param stopwords Reader to read stop words from */
public StandardAnalyzer(Version matchVersion, Reader stopwords) throws IOException {
this(matchVersion, WordlistLoader.getWordSet(stopwords));
}
/** Constructs a {@link StandardTokenizer} filtered by a {@link
StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. */
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
StandardTokenizer tokenStream = new StandardTokenizer(matchVersion, reader);
tokenStream.setMaxTokenLength(maxTokenLength);
TokenStream result = new StandardFilter(tokenStream);
result = new LowerCaseFilter(result);
result = new StopFilter(enableStopPositionIncrements, result, stopSet);
return result;
}
private static final class SavedStreams {
StandardTokenizer tokenStream;
TokenStream filteredTokenStream;
}
/** Default maximum allowed token length */
public static final int DEFAULT_MAX_TOKEN_LENGTH = 255;
private int maxTokenLength = DEFAULT_MAX_TOKEN_LENGTH;
/**
* Set maximum allowed token length. If a token is seen
* that exceeds this length then it is discarded. This
* setting only takes effect the next time tokenStream or
* reusableTokenStream is called.
*/
public void setMaxTokenLength(int length) {
maxTokenLength = length;
}
/**
* @see #setMaxTokenLength
*/
public int getMaxTokenLength() {
return maxTokenLength;
}
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
if (overridesTokenStreamMethod) {
// LUCENE-1678: force fallback to tokenStream() if we
// have been subclassed and that subclass overrides
// tokenStream but not reusableTokenStream
return tokenStream(fieldName, reader);
}
SavedStreams streams = (SavedStreams) getPreviousTokenStream();
if (streams == null) {
streams = new SavedStreams();
setPreviousTokenStream(streams);
streams.tokenStream = new StandardTokenizer(matchVersion, reader);
streams.filteredTokenStream = new StandardFilter(streams.tokenStream);
streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream);
streams.filteredTokenStream = new StopFilter(enableStopPositionIncrements,
streams.filteredTokenStream, stopSet);
} else {
streams.tokenStream.reset(reader);
}
streams.tokenStream.setMaxTokenLength(maxTokenLength);
streams.tokenStream.setReplaceInvalidAcronym(replaceInvalidAcronym);
return streams.filteredTokenStream;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java | Java | art | 6,215 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis.standard;
import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.Version;
/** A grammar-based tokenizer constructed with JFlex
*
* <p> This should be a good tokenizer for most European-language documents:
*
* <ul>
* <li>Splits words at punctuation characters, removing punctuation. However, a
* dot that's not followed by whitespace is considered part of a token.
* <li>Splits words at hyphens, unless there's a number in the token, in which case
* the whole token is interpreted as a product number and is not split.
* <li>Recognizes email addresses and internet hostnames as one token.
* </ul>
*
* <p>Many applications have specific tokenizer needs. If this tokenizer does
* not suit your application, please consider copying this source code
* directory to your project and maintaining your own grammar-based tokenizer.
*
* <a name="version"/>
* <p>You must specify the required {@link Version}
* compatibility when creating StandardAnalyzer:
* <ul>
* <li> As of 2.4, Tokens incorrectly identified as acronyms
* are corrected (see <a href="https://issues.apache.org/jira/browse/LUCENE-1068">LUCENE-1608</a>
* </ul>
*/
public final class StandardTokenizer extends Tokenizer {
/** A private instance of the JFlex-constructed scanner */
private final StandardTokenizerImpl scanner;
public static final int ALPHANUM = 0;
public static final int APOSTROPHE = 1;
public static final int ACRONYM = 2;
public static final int COMPANY = 3;
public static final int EMAIL = 4;
public static final int HOST = 5;
public static final int NUM = 6;
public static final int CJ = 7;
/**
* @deprecated this solves a bug where HOSTs that end with '.' are identified
* as ACRONYMs.
*/
public static final int ACRONYM_DEP = 8;
/** String token types that correspond to token type int constants */
public static final String [] TOKEN_TYPES = new String [] {
"<ALPHANUM>",
"<APOSTROPHE>",
"<ACRONYM>",
"<COMPANY>",
"<EMAIL>",
"<HOST>",
"<NUM>",
"<CJ>",
"<ACRONYM_DEP>"
};
private boolean replaceInvalidAcronym;
private int maxTokenLength = StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH;
/** Set the max allowed token length. Any token longer
* than this is skipped. */
public void setMaxTokenLength(int length) {
this.maxTokenLength = length;
}
/** @see #setMaxTokenLength */
public int getMaxTokenLength() {
return maxTokenLength;
}
/**
* Creates a new instance of the {@link org.apache.lucene.analysis.standard.StandardTokenizer}. Attaches
* the <code>input</code> to the newly created JFlex scanner.
*
* @param input The input reader
*
* See http://issues.apache.org/jira/browse/LUCENE-1068
*/
public StandardTokenizer(Version matchVersion, Reader input) {
super();
this.scanner = new StandardTokenizerImpl(input);
init(input, matchVersion);
}
/**
* Creates a new StandardTokenizer with a given {@link AttributeSource}.
*/
public StandardTokenizer(Version matchVersion, AttributeSource source, Reader input) {
super(source);
this.scanner = new StandardTokenizerImpl(input);
init(input, matchVersion);
}
/**
* Creates a new StandardTokenizer with a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}
*/
public StandardTokenizer(Version matchVersion, AttributeFactory factory, Reader input) {
super(factory);
this.scanner = new StandardTokenizerImpl(input);
init(input, matchVersion);
}
private void init(Reader input, Version matchVersion) {
if (matchVersion.onOrAfter(Version.LUCENE_24)) {
replaceInvalidAcronym = true;
} else {
replaceInvalidAcronym = false;
}
this.input = input;
termAtt = addAttribute(TermAttribute.class);
offsetAtt = addAttribute(OffsetAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
typeAtt = addAttribute(TypeAttribute.class);
}
// this tokenizer generates three attributes:
// offset, positionIncrement and type
private TermAttribute termAtt;
private OffsetAttribute offsetAtt;
private PositionIncrementAttribute posIncrAtt;
private TypeAttribute typeAtt;
/*
* (non-Javadoc)
*
* @see org.apache.lucene.analysis.TokenStream#next()
*/
@Override
public final boolean incrementToken() throws IOException {
clearAttributes();
int posIncr = 1;
while(true) {
int tokenType = scanner.getNextToken();
if (tokenType == StandardTokenizerImpl.YYEOF) {
return false;
}
if (scanner.yylength() <= maxTokenLength) {
posIncrAtt.setPositionIncrement(posIncr);
scanner.getText(termAtt);
final int start = scanner.yychar();
offsetAtt.setOffset(correctOffset(start), correctOffset(start+termAtt.termLength()));
// This 'if' should be removed in the next release. For now, it converts
// invalid acronyms to HOST. When removed, only the 'else' part should
// remain.
if (tokenType == StandardTokenizerImpl.ACRONYM_DEP) {
if (replaceInvalidAcronym) {
typeAtt.setType(StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.HOST]);
termAtt.setTermLength(termAtt.termLength() - 1); // remove extra '.'
} else {
typeAtt.setType(StandardTokenizerImpl.TOKEN_TYPES[StandardTokenizerImpl.ACRONYM]);
}
} else {
typeAtt.setType(StandardTokenizerImpl.TOKEN_TYPES[tokenType]);
}
return true;
} else
// When we skip a too-long term, we still increment the
// position increment
posIncr++;
}
}
@Override
public final void end() {
// set final offset
int finalOffset = correctOffset(scanner.yychar() + scanner.yylength());
offsetAtt.setOffset(finalOffset, finalOffset);
}
/*
* (non-Javadoc)
*
* @see org.apache.lucene.analysis.TokenStream#reset()
*/
@Override
public void reset() throws IOException {
super.reset();
scanner.yyreset(input);
}
@Override
public void reset(Reader reader) throws IOException {
super.reset(reader);
reset();
}
/**
* Prior to https://issues.apache.org/jira/browse/LUCENE-1068, StandardTokenizer mischaracterized as acronyms tokens like www.abc.com
* when they should have been labeled as hosts instead.
* @return true if StandardTokenizer now returns these tokens as Hosts, otherwise false
*
* @deprecated Remove in 3.X and make true the only valid value
*/
public boolean isReplaceInvalidAcronym() {
return replaceInvalidAcronym;
}
/**
*
* @param replaceInvalidAcronym Set to true to replace mischaracterized acronyms as HOST.
* @deprecated Remove in 3.X and make true the only valid value
*
* See https://issues.apache.org/jira/browse/LUCENE-1068
*/
public void setReplaceInvalidAcronym(boolean replaceInvalidAcronym) {
this.replaceInvalidAcronym = replaceInvalidAcronym;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java | Java | art | 8,417 |
package org.apache.lucene.analysis.standard;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
WARNING: if you change StandardTokenizerImpl.jflex and need to regenerate
the tokenizer, only use Java 1.4 !!!
This grammar currently uses constructs (eg :digit:, :letter:) whose
meaning can vary according to the JRE used to run jflex. See
https://issues.apache.org/jira/browse/LUCENE-1126 for details.
For current backwards compatibility it is needed to support
only Java 1.4 - this will change in Lucene 3.1.
*/
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
%%
%class StandardTokenizerImpl
%unicode
%integer
%function getNextToken
%pack
%char
%{
public static final int ALPHANUM = StandardTokenizer.ALPHANUM;
public static final int APOSTROPHE = StandardTokenizer.APOSTROPHE;
public static final int ACRONYM = StandardTokenizer.ACRONYM;
public static final int COMPANY = StandardTokenizer.COMPANY;
public static final int EMAIL = StandardTokenizer.EMAIL;
public static final int HOST = StandardTokenizer.HOST;
public static final int NUM = StandardTokenizer.NUM;
public static final int CJ = StandardTokenizer.CJ;
/**
* @deprecated this solves a bug where HOSTs that end with '.' are identified
* as ACRONYMs.
*/
public static final int ACRONYM_DEP = StandardTokenizer.ACRONYM_DEP;
public static final String [] TOKEN_TYPES = StandardTokenizer.TOKEN_TYPES;
public final int yychar()
{
return yychar;
}
/**
* Fills Lucene token with the current token text.
*/
final void getText(Token t) {
t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
}
/**
* Fills TermAttribute with the current token text.
*/
final void getText(TermAttribute t) {
t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
}
%}
THAI = [\u0E00-\u0E59]
// basic word: a sequence of digits & letters (includes Thai to enable ThaiAnalyzer to function)
ALPHANUM = ({LETTER}|{THAI}|[:digit:])+
// internal apostrophes: O'Reilly, you're, O'Reilly's
// use a post-filter to remove possessives
APOSTROPHE = {ALPHA} ("'" {ALPHA})+
// acronyms: U.S.A., I.B.M., etc.
// use a post-filter to remove dots
ACRONYM = {LETTER} "." ({LETTER} ".")+
ACRONYM_DEP = {ALPHANUM} "." ({ALPHANUM} ".")+
// company names like AT&T and Excite@Home.
COMPANY = {ALPHA} ("&"|"@") {ALPHA}
// email addresses
EMAIL = {ALPHANUM} (("."|"-"|"_") {ALPHANUM})* "@" {ALPHANUM} (("."|"-") {ALPHANUM})+
// hostname
HOST = {ALPHANUM} ((".") {ALPHANUM})+
// floating point, serial, model numbers, ip addresses, etc.
// every other segment must have at least one digit
NUM = ({ALPHANUM} {P} {HAS_DIGIT}
| {HAS_DIGIT} {P} {ALPHANUM}
| {ALPHANUM} ({P} {HAS_DIGIT} {P} {ALPHANUM})+
| {HAS_DIGIT} ({P} {ALPHANUM} {P} {HAS_DIGIT})+
| {ALPHANUM} {P} {HAS_DIGIT} ({P} {ALPHANUM} {P} {HAS_DIGIT})+
| {HAS_DIGIT} {P} {ALPHANUM} ({P} {HAS_DIGIT} {P} {ALPHANUM})+)
// punctuation
P = ("_"|"-"|"/"|"."|",")
// at least one digit
HAS_DIGIT = ({LETTER}|[:digit:])* [:digit:] ({LETTER}|[:digit:])*
ALPHA = ({LETTER})+
// From the JFlex manual: "the expression that matches everything of <a> not matched by <b> is !(!<a>|<b>)"
LETTER = !(![:letter:]|{CJ})
// Chinese and Japanese (but NOT Korean, which is included in [:letter:])
CJ = [\u3100-\u312f\u3040-\u309F\u30A0-\u30FF\u31F0-\u31FF\u3300-\u337f\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff\uff65-\uff9f]
WHITESPACE = \r\n | [ \r\n\t\f]
%%
{ALPHANUM} { return ALPHANUM; }
{APOSTROPHE} { return APOSTROPHE; }
{ACRONYM} { return ACRONYM; }
{COMPANY} { return COMPANY; }
{EMAIL} { return EMAIL; }
{HOST} { return HOST; }
{NUM} { return NUM; }
{CJ} { return CJ; }
{ACRONYM_DEP} { return ACRONYM_DEP; }
/** Ignore the rest */
. | {WHITESPACE} { /* ignore */ }
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex | JFlex | art | 5,293 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis;
import java.io.IOException;
import java.io.Reader;
import java.util.LinkedList;
/**
* Simplistic {@link CharFilter} that applies the mappings
* contained in a {@link NormalizeCharMap} to the character
* stream, and correcting the resulting changes to the
* offsets.
*/
public class MappingCharFilter extends BaseCharFilter {
private final NormalizeCharMap normMap;
private LinkedList<Character> buffer;
private String replacement;
private int charPointer;
private int nextCharCounter;
/** Default constructor that takes a {@link CharStream}. */
public MappingCharFilter(NormalizeCharMap normMap, CharStream in) {
super(in);
this.normMap = normMap;
}
/** Easy-use constructor that takes a {@link Reader}. */
public MappingCharFilter(NormalizeCharMap normMap, Reader in) {
super(CharReader.get(in));
this.normMap = normMap;
}
@Override
public int read() throws IOException {
while(true) {
if (replacement != null && charPointer < replacement.length()) {
return replacement.charAt(charPointer++);
}
int firstChar = nextChar();
if (firstChar == -1) return -1;
NormalizeCharMap nm = normMap.submap != null ?
normMap.submap.get(Character.valueOf((char) firstChar)) : null;
if (nm == null) return firstChar;
NormalizeCharMap result = match(nm);
if (result == null) return firstChar;
replacement = result.normStr;
charPointer = 0;
if (result.diff != 0) {
int prevCumulativeDiff = getLastCumulativeDiff();
if (result.diff < 0) {
for(int i = 0; i < -result.diff ; i++)
addOffCorrectMap(nextCharCounter + i - prevCumulativeDiff, prevCumulativeDiff - 1 - i);
} else {
addOffCorrectMap(nextCharCounter - result.diff - prevCumulativeDiff, prevCumulativeDiff + result.diff);
}
}
}
}
private int nextChar() throws IOException {
nextCharCounter++;
if (buffer != null && !buffer.isEmpty()) {
return buffer.removeFirst().charValue();
}
return input.read();
}
private void pushChar(int c) {
nextCharCounter--;
if(buffer == null)
buffer = new LinkedList<Character>();
buffer.addFirst(Character.valueOf((char) c));
}
private void pushLastChar(int c) {
if (buffer == null) {
buffer = new LinkedList<Character>();
}
buffer.addLast(Character.valueOf((char) c));
}
private NormalizeCharMap match(NormalizeCharMap map) throws IOException {
NormalizeCharMap result = null;
if (map.submap != null) {
int chr = nextChar();
if (chr != -1) {
NormalizeCharMap subMap = map.submap.get(Character.valueOf((char) chr));
if (subMap != null) {
result = match(subMap);
}
if (result == null) {
pushChar(chr);
}
}
}
if (result == null && map.normStr != null) {
result = map;
}
return result;
}
@Override
public int read(char[] cbuf, int off, int len) throws IOException {
char[] tmp = new char[len];
int l = input.read(tmp, 0, len);
if (l != -1) {
for(int i = 0; i < l; i++)
pushLastChar(tmp[i]);
}
l = 0;
for(int i = off; i < off + len; i++) {
int c = read();
if (c == -1) break;
cbuf[i] = (char) c;
l++;
}
return l == 0 ? -1 : l;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/MappingCharFilter.java | Java | art | 4,213 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Closeable;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeSource;
/**
* A <code>TokenStream</code> enumerates the sequence of tokens, either from
* {@link Field}s of a {@link Document} or from query text.
* <p>
* This is an abstract class; concrete subclasses are:
* <ul>
* <li>{@link Tokenizer}, a <code>TokenStream</code> whose input is a Reader; and
* <li>{@link TokenFilter}, a <code>TokenStream</code> whose input is another
* <code>TokenStream</code>.
* </ul>
* A new <code>TokenStream</code> API has been introduced with Lucene 2.9. This API
* has moved from being {@link Token}-based to {@link Attribute}-based. While
* {@link Token} still exists in 2.9 as a convenience class, the preferred way
* to store the information of a {@link Token} is to use {@link AttributeImpl}s.
* <p>
* <code>TokenStream</code> now extends {@link AttributeSource}, which provides
* access to all of the token {@link Attribute}s for the <code>TokenStream</code>.
* Note that only one instance per {@link AttributeImpl} is created and reused
* for every token. This approach reduces object creation and allows local
* caching of references to the {@link AttributeImpl}s. See
* {@link #incrementToken()} for further details.
* <p>
* <b>The workflow of the new <code>TokenStream</code> API is as follows:</b>
* <ol>
* <li>Instantiation of <code>TokenStream</code>/{@link TokenFilter}s which add/get
* attributes to/from the {@link AttributeSource}.
* <li>The consumer calls {@link TokenStream#reset()}.
* <li>The consumer retrieves attributes from the stream and stores local
* references to all attributes it wants to access.
* <li>The consumer calls {@link #incrementToken()} until it returns false
* consuming the attributes after each call.
* <li>The consumer calls {@link #end()} so that any end-of-stream operations
* can be performed.
* <li>The consumer calls {@link #close()} to release any resource when finished
* using the <code>TokenStream</code>.
* </ol>
* To make sure that filters and consumers know which attributes are available,
* the attributes must be added during instantiation. Filters and consumers are
* not required to check for availability of attributes in
* {@link #incrementToken()}.
* <p>
* You can find some example code for the new API in the analysis package level
* Javadoc.
* <p>
* Sometimes it is desirable to capture a current state of a <code>TokenStream</code>,
* e.g., for buffering purposes (see {@link CachingTokenFilter},
* {@link TeeSinkTokenFilter}). For this usecase
* {@link AttributeSource#captureState} and {@link AttributeSource#restoreState}
* can be used.
*/
public abstract class TokenStream extends AttributeSource implements Closeable {
/**
* A TokenStream using the default attribute factory.
*/
protected TokenStream() {
super();
}
/**
* A TokenStream that uses the same attributes as the supplied one.
*/
protected TokenStream(AttributeSource input) {
super(input);
}
/**
* A TokenStream using the supplied AttributeFactory for creating new {@link Attribute} instances.
*/
protected TokenStream(AttributeFactory factory) {
super(factory);
}
/**
* Consumers (i.e., {@link IndexWriter}) use this method to advance the stream to
* the next token. Implementing classes must implement this method and update
* the appropriate {@link AttributeImpl}s with the attributes of the next
* token.
* <P>
* The producer must make no assumptions about the attributes after the method
* has been returned: the caller may arbitrarily change it. If the producer
* needs to preserve the state for subsequent calls, it can use
* {@link #captureState} to create a copy of the current attribute state.
* <p>
* This method is called for every token of a document, so an efficient
* implementation is crucial for good performance. To avoid calls to
* {@link #addAttribute(Class)} and {@link #getAttribute(Class)},
* references to all {@link AttributeImpl}s that this stream uses should be
* retrieved during instantiation.
* <p>
* To ensure that filters and consumers know which attributes are available,
* the attributes must be added during instantiation. Filters and consumers
* are not required to check for availability of attributes in
* {@link #incrementToken()}.
*
* @return false for end of stream; true otherwise
*/
public abstract boolean incrementToken() throws IOException;
/**
* This method is called by the consumer after the last token has been
* consumed, after {@link #incrementToken()} returned <code>false</code>
* (using the new <code>TokenStream</code> API). Streams implementing the old API
* should upgrade to use this feature.
* <p/>
* This method can be used to perform any end-of-stream operations, such as
* setting the final offset of a stream. The final offset of a stream might
* differ from the offset of the last token eg in case one or more whitespaces
* followed after the last token, but a {@link WhitespaceTokenizer} was used.
*
* @throws IOException
*/
public void end() throws IOException {
// do nothing by default
}
/**
* Resets this stream to the beginning. This is an optional operation, so
* subclasses may or may not implement this method. {@link #reset()} is not needed for
* the standard indexing process. However, if the tokens of a
* <code>TokenStream</code> are intended to be consumed more than once, it is
* necessary to implement {@link #reset()}. Note that if your TokenStream
* caches tokens and feeds them back again after a reset, it is imperative
* that you clone the tokens when you store them away (on the first pass) as
* well as when you return them (on future passes after {@link #reset()}).
*/
public void reset() throws IOException {}
/** Releases resources associated with this stream. */
public void close() throws IOException {}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/TokenStream.java | Java | art | 7,084 |
package org.apache.lucene.analysis;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A filter that replaces accented characters in the ISO Latin 1 character set
* (ISO-8859-1) by their unaccented equivalent. The case will not be altered.
* <p>
* For instance, 'à' will be replaced by 'a'.
* <p>
*
* @deprecated If you build a new index, use {@link ASCIIFoldingFilter}
* which covers a superset of Latin 1.
* This class is included for use with existing
* indexes and will be removed in a future release (possibly Lucene 4.0).
*/
public final class ISOLatin1AccentFilter extends TokenFilter {
public ISOLatin1AccentFilter(TokenStream input) {
super(input);
termAtt = addAttribute(TermAttribute.class);
}
private char[] output = new char[256];
private int outputPos;
private TermAttribute termAtt;
@Override
public final boolean incrementToken() throws java.io.IOException {
if (input.incrementToken()) {
final char[] buffer = termAtt.termBuffer();
final int length = termAtt.termLength();
// If no characters actually require rewriting then we
// just return token as-is:
for(int i=0;i<length;i++) {
final char c = buffer[i];
if (c >= '\u00c0' && c <= '\uFB06') {
removeAccents(buffer, length);
termAtt.setTermBuffer(output, 0, outputPos);
break;
}
}
return true;
} else
return false;
}
/**
* To replace accented characters in a String by unaccented equivalents.
*/
public final void removeAccents(char[] input, int length) {
// Worst-case length required:
final int maxSizeNeeded = 2*length;
int size = output.length;
while (size < maxSizeNeeded)
size *= 2;
if (size != output.length)
output = new char[size];
outputPos = 0;
int pos = 0;
for (int i=0; i<length; i++, pos++) {
final char c = input[pos];
// Quick test: if it's not in range then just keep
// current character
if (c < '\u00c0' || c > '\uFB06')
output[outputPos++] = c;
else {
switch (c) {
case '\u00C0' : // À
case '\u00C1' : // Á
case '\u00C2' : // Â
case '\u00C3' : // Ã
case '\u00C4' : // Ä
case '\u00C5' : // Å
output[outputPos++] = 'A';
break;
case '\u00C6' : // Æ
output[outputPos++] = 'A';
output[outputPos++] = 'E';
break;
case '\u00C7' : // Ç
output[outputPos++] = 'C';
break;
case '\u00C8' : // È
case '\u00C9' : // É
case '\u00CA' : // Ê
case '\u00CB' : // Ë
output[outputPos++] = 'E';
break;
case '\u00CC' : // Ì
case '\u00CD' : // Í
case '\u00CE' : // Î
case '\u00CF' : // Ï
output[outputPos++] = 'I';
break;
case '\u0132' : // IJ
output[outputPos++] = 'I';
output[outputPos++] = 'J';
break;
case '\u00D0' : // Ð
output[outputPos++] = 'D';
break;
case '\u00D1' : // Ñ
output[outputPos++] = 'N';
break;
case '\u00D2' : // Ò
case '\u00D3' : // Ó
case '\u00D4' : // Ô
case '\u00D5' : // Õ
case '\u00D6' : // Ö
case '\u00D8' : // Ø
output[outputPos++] = 'O';
break;
case '\u0152' : // Œ
output[outputPos++] = 'O';
output[outputPos++] = 'E';
break;
case '\u00DE' : // Þ
output[outputPos++] = 'T';
output[outputPos++] = 'H';
break;
case '\u00D9' : // Ù
case '\u00DA' : // Ú
case '\u00DB' : // Û
case '\u00DC' : // Ü
output[outputPos++] = 'U';
break;
case '\u00DD' : // Ý
case '\u0178' : // Ÿ
output[outputPos++] = 'Y';
break;
case '\u00E0' : // à
case '\u00E1' : // á
case '\u00E2' : // â
case '\u00E3' : // ã
case '\u00E4' : // ä
case '\u00E5' : // å
output[outputPos++] = 'a';
break;
case '\u00E6' : // æ
output[outputPos++] = 'a';
output[outputPos++] = 'e';
break;
case '\u00E7' : // ç
output[outputPos++] = 'c';
break;
case '\u00E8' : // è
case '\u00E9' : // é
case '\u00EA' : // ê
case '\u00EB' : // ë
output[outputPos++] = 'e';
break;
case '\u00EC' : // ì
case '\u00ED' : // í
case '\u00EE' : // î
case '\u00EF' : // ï
output[outputPos++] = 'i';
break;
case '\u0133' : // ij
output[outputPos++] = 'i';
output[outputPos++] = 'j';
break;
case '\u00F0' : // ð
output[outputPos++] = 'd';
break;
case '\u00F1' : // ñ
output[outputPos++] = 'n';
break;
case '\u00F2' : // ò
case '\u00F3' : // ó
case '\u00F4' : // ô
case '\u00F5' : // õ
case '\u00F6' : // ö
case '\u00F8' : // ø
output[outputPos++] = 'o';
break;
case '\u0153' : // œ
output[outputPos++] = 'o';
output[outputPos++] = 'e';
break;
case '\u00DF' : // ß
output[outputPos++] = 's';
output[outputPos++] = 's';
break;
case '\u00FE' : // þ
output[outputPos++] = 't';
output[outputPos++] = 'h';
break;
case '\u00F9' : // ù
case '\u00FA' : // ú
case '\u00FB' : // û
case '\u00FC' : // ü
output[outputPos++] = 'u';
break;
case '\u00FD' : // ý
case '\u00FF' : // ÿ
output[outputPos++] = 'y';
break;
case '\uFB00': // ff
output[outputPos++] = 'f';
output[outputPos++] = 'f';
break;
case '\uFB01': // fi
output[outputPos++] = 'f';
output[outputPos++] = 'i';
break;
case '\uFB02': // fl
output[outputPos++] = 'f';
output[outputPos++] = 'l';
break;
// following 2 are commented as they can break the maxSizeNeeded (and doing *3 could be expensive)
// case '\uFB03': // ffi
// output[outputPos++] = 'f';
// output[outputPos++] = 'f';
// output[outputPos++] = 'i';
// break;
// case '\uFB04': // ffl
// output[outputPos++] = 'f';
// output[outputPos++] = 'f';
// output[outputPos++] = 'l';
// break;
case '\uFB05': // ſt
output[outputPos++] = 'f';
output[outputPos++] = 't';
break;
case '\uFB06': // st
output[outputPos++] = 's';
output[outputPos++] = 't';
break;
default :
output[outputPos++] = c;
break;
}
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/ISOLatin1AccentFilter.java | Java | art | 7,959 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** A TokenFilter is a TokenStream whose input is another TokenStream.
<p>
This is an abstract class; subclasses must override {@link #incrementToken()}.
@see TokenStream
*/
public abstract class TokenFilter extends TokenStream {
/** The source of tokens for this filter. */
protected final TokenStream input;
/** Construct a token stream filtering the given input. */
protected TokenFilter(TokenStream input) {
super(input);
this.input = input;
}
/** Performs end-of-stream operations, if any, and calls then <code>end()</code> on the
* input TokenStream.<p/>
* <b>NOTE:</b> Be sure to call <code>super.end()</code> first when overriding this method.*/
@Override
public void end() throws IOException {
input.end();
}
/** Close the input TokenStream. */
@Override
public void close() throws IOException {
input.close();
}
/** Reset the filter as well as the input TokenStream. */
@Override
public void reset() throws IOException {
input.reset();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/TokenFilter.java | Java | art | 1,897 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Set;
import java.util.List;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.queryParser.QueryParser; // for javadoc
import org.apache.lucene.util.Version;
/**
* Removes stop words from a token stream.
*/
public final class StopFilter extends TokenFilter {
private final CharArraySet stopWords;
private boolean enablePositionIncrements = false;
private TermAttribute termAtt;
private PositionIncrementAttribute posIncrAtt;
/**
* Construct a token stream filtering the given input.
* If <code>stopWords</code> is an instance of {@link CharArraySet} (true if
* <code>makeStopSet()</code> was used to construct the set) it will be directly used
* and <code>ignoreCase</code> will be ignored since <code>CharArraySet</code>
* directly controls case sensitivity.
* <p/>
* If <code>stopWords</code> is not an instance of {@link CharArraySet},
* a new CharArraySet will be constructed and <code>ignoreCase</code> will be
* used to specify the case sensitivity of that set.
*
* @param enablePositionIncrements true if token positions should record the removed stop words
* @param input Input TokenStream
* @param stopWords A Set of Strings or char[] or any other toString()-able set representing the stopwords
* @param ignoreCase if true, all words are lower cased first
*/
public StopFilter(boolean enablePositionIncrements, TokenStream input, Set<?> stopWords, boolean ignoreCase)
{
super(input);
if (stopWords instanceof CharArraySet) {
this.stopWords = (CharArraySet)stopWords;
} else {
this.stopWords = new CharArraySet(stopWords.size(), ignoreCase);
this.stopWords.addAll(stopWords);
}
this.enablePositionIncrements = enablePositionIncrements;
termAtt = addAttribute(TermAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
}
/**
* Constructs a filter which removes words from the input
* TokenStream that are named in the Set.
*
* @param enablePositionIncrements true if token positions should record the removed stop words
* @param in Input stream
* @param stopWords A Set of Strings or char[] or any other toString()-able set representing the stopwords
* @see #makeStopSet(java.lang.String[])
*/
public StopFilter(boolean enablePositionIncrements, TokenStream in, Set<?> stopWords) {
this(enablePositionIncrements, in, stopWords, false);
}
/**
* Builds a Set from an array of stop words,
* appropriate for passing into the StopFilter constructor.
* This permits this stopWords construction to be cached once when
* an Analyzer is constructed.
*
* @see #makeStopSet(java.lang.String[], boolean) passing false to ignoreCase
*/
public static final Set<Object> makeStopSet(String... stopWords) {
return makeStopSet(stopWords, false);
}
/**
* Builds a Set from an array of stop words,
* appropriate for passing into the StopFilter constructor.
* This permits this stopWords construction to be cached once when
* an Analyzer is constructed.
* @param stopWords A List of Strings or char[] or any other toString()-able list representing the stopwords
* @return A Set ({@link CharArraySet}) containing the words
* @see #makeStopSet(java.lang.String[], boolean) passing false to ignoreCase
*/
public static final Set<Object> makeStopSet(List<?> stopWords) {
return makeStopSet(stopWords, false);
}
/**
*
* @param stopWords An array of stopwords
* @param ignoreCase If true, all words are lower cased first.
* @return a Set containing the words
*/
public static final Set<Object> makeStopSet(String[] stopWords, boolean ignoreCase) {
CharArraySet stopSet = new CharArraySet(stopWords.length, ignoreCase);
stopSet.addAll(Arrays.asList(stopWords));
return stopSet;
}
/**
*
* @param stopWords A List of Strings or char[] or any other toString()-able list representing the stopwords
* @param ignoreCase if true, all words are lower cased first
* @return A Set ({@link CharArraySet}) containing the words
*/
public static final Set<Object> makeStopSet(List<?> stopWords, boolean ignoreCase){
CharArraySet stopSet = new CharArraySet(stopWords.size(), ignoreCase);
stopSet.addAll(stopWords);
return stopSet;
}
/**
* Returns the next input Token whose term() is not a stop word.
*/
@Override
public final boolean incrementToken() throws IOException {
// return the first non-stop word found
int skippedPositions = 0;
while (input.incrementToken()) {
if (!stopWords.contains(termAtt.termBuffer(), 0, termAtt.termLength())) {
if (enablePositionIncrements) {
posIncrAtt.setPositionIncrement(posIncrAtt.getPositionIncrement() + skippedPositions);
}
return true;
}
skippedPositions += posIncrAtt.getPositionIncrement();
}
// reached EOS -- return false
return false;
}
/**
* Returns version-dependent default for
* enablePositionIncrements. Analyzers that embed
* StopFilter use this method when creating the
* StopFilter. Prior to 2.9, this returns false. On 2.9
* or later, it returns true.
*/
public static boolean getEnablePositionIncrementsVersionDefault(Version matchVersion) {
return matchVersion.onOrAfter(Version.LUCENE_29);
}
/**
* @see #setEnablePositionIncrements(boolean).
*/
public boolean getEnablePositionIncrements() {
return enablePositionIncrements;
}
/**
* If <code>true</code>, this StopFilter will preserve
* positions of the incoming tokens (ie, accumulate and
* set position increments of the removed stop tokens).
* Generally, <code>true</code> is best as it does not
* lose information (positions of the original tokens)
* during indexing.
*
* <p> When set, when a token is stopped
* (omitted), the position increment of the following
* token is incremented.
*
* <p> <b>NOTE</b>: be sure to also
* set {@link QueryParser#setEnablePositionIncrements} if
* you use QueryParser to create queries.
*/
public void setEnablePositionIncrements(boolean enable) {
this.enablePositionIncrements = enable;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/StopFilter.java | Java | art | 7,255 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import java.io.IOException;
import java.io.Closeable;
import java.lang.reflect.Method;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.document.Fieldable;
/** An Analyzer builds TokenStreams, which analyze text. It thus represents a
* policy for extracting index terms from text.
* <p>
* Typical implementations first build a Tokenizer, which breaks the stream of
* characters from the Reader into raw Tokens. One or more TokenFilters may
* then be applied to the output of the Tokenizer.
*/
public abstract class Analyzer implements Closeable {
/** Creates a TokenStream which tokenizes all the text in the provided
* Reader. Must be able to handle null field name for
* backward compatibility.
*/
public abstract TokenStream tokenStream(String fieldName, Reader reader);
/** Creates a TokenStream that is allowed to be re-used
* from the previous time that the same thread called
* this method. Callers that do not need to use more
* than one TokenStream at the same time from this
* analyzer should use this method for better
* performance.
*/
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
return tokenStream(fieldName, reader);
}
private CloseableThreadLocal<Object> tokenStreams = new CloseableThreadLocal<Object>();
/** Used by Analyzers that implement reusableTokenStream
* to retrieve previously saved TokenStreams for re-use
* by the same thread. */
protected Object getPreviousTokenStream() {
try {
return tokenStreams.get();
} catch (NullPointerException npe) {
if (tokenStreams == null) {
throw new AlreadyClosedException("this Analyzer is closed");
} else {
throw npe;
}
}
}
/** Used by Analyzers that implement reusableTokenStream
* to save a TokenStream for later re-use by the same
* thread. */
protected void setPreviousTokenStream(Object obj) {
try {
tokenStreams.set(obj);
} catch (NullPointerException npe) {
if (tokenStreams == null) {
throw new AlreadyClosedException("this Analyzer is closed");
} else {
throw npe;
}
}
}
/** @deprecated */
protected boolean overridesTokenStreamMethod = false;
/** @deprecated This is only present to preserve
* back-compat of classes that subclass a core analyzer
* and override tokenStream but not reusableTokenStream */
protected void setOverridesTokenStreamMethod(Class<? extends Analyzer> baseClass) {
try {
Method m = this.getClass().getMethod("tokenStream", String.class, Reader.class);
overridesTokenStreamMethod = m.getDeclaringClass() != baseClass;
} catch (NoSuchMethodException nsme) {
// cannot happen, as baseClass is subclass of Analyzer through generics
overridesTokenStreamMethod = false;
}
}
/**
* Invoked before indexing a Fieldable instance if
* terms have already been added to that field. This allows custom
* analyzers to place an automatic position increment gap between
* Fieldable instances using the same field name. The default value
* position increment gap is 0. With a 0 position increment gap and
* the typical default token position increment of 1, all terms in a field,
* including across Fieldable instances, are in successive positions, allowing
* exact PhraseQuery matches, for instance, across Fieldable instance boundaries.
*
* @param fieldName Fieldable name being indexed.
* @return position increment gap, added to the next token emitted from {@link #tokenStream(String,Reader)}
*/
public int getPositionIncrementGap(String fieldName) {
return 0;
}
/**
* Just like {@link #getPositionIncrementGap}, except for
* Token offsets instead. By default this returns 1 for
* tokenized fields and, as if the fields were joined
* with an extra space character, and 0 for un-tokenized
* fields. This method is only called if the field
* produced at least one token for indexing.
*
* @param field the field just indexed
* @return offset gap, added to the next token emitted from {@link #tokenStream(String,Reader)}
*/
public int getOffsetGap(Fieldable field) {
if (field.isTokenized())
return 1;
else
return 0;
}
/** Frees persistent resources used by this Analyzer */
public void close() {
tokenStreams.close();
tokenStreams = null;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/Analyzer.java | Java | art | 5,402 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
/**
* Removes words that are too long or too short from the stream.
*/
public final class LengthFilter extends TokenFilter {
final int min;
final int max;
private TermAttribute termAtt;
/**
* Build a filter that removes words that are too long or too
* short from the text.
*/
public LengthFilter(TokenStream in, int min, int max)
{
super(in);
this.min = min;
this.max = max;
termAtt = addAttribute(TermAttribute.class);
}
/**
* Returns the next input Token whose term() is the right len
*/
@Override
public final boolean incrementToken() throws IOException {
// return the first non-stop word found
while (input.incrementToken()) {
int len = termAtt.termLength();
if (len >= min && len <= max) {
return true;
}
// note: else we ignore it but should we index each part of it?
}
// reached EOS -- return false
return false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/LengthFilter.java | Java | art | 1,880 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.util.AttributeImpl;
/** The positionIncrement determines the position of this token
* relative to the previous Token in a {@link TokenStream}, used in phrase
* searching.
*
* <p>The default value is one.
*
* <p>Some common uses for this are:<ul>
*
* <li>Set it to zero to put multiple terms in the same position. This is
* useful if, e.g., a word has multiple stems. Searches for phrases
* including either stem will match. In this case, all but the first stem's
* increment should be set to zero: the increment of the first instance
* should be one. Repeating a token with an increment of zero can also be
* used to boost the scores of matches on that token.
*
* <li>Set it to values greater than one to inhibit exact phrase matches.
* If, for example, one does not want phrases to match across removed stop
* words, then one could build a stop word filter that removes stop words and
* also sets the increment to the number of stop words removed before each
* non-stop word. Then exact phrase queries will only match when the terms
* occur with no intervening stop words.
*
* </ul>
*/
public class PositionIncrementAttributeImpl extends AttributeImpl implements PositionIncrementAttribute, Cloneable, Serializable {
private int positionIncrement = 1;
/** Set the position increment. The default value is one.
*
* @param positionIncrement the distance from the prior term
*/
public void setPositionIncrement(int positionIncrement) {
if (positionIncrement < 0)
throw new IllegalArgumentException
("Increment must be zero or greater: " + positionIncrement);
this.positionIncrement = positionIncrement;
}
/** Returns the position increment of this Token.
* @see #setPositionIncrement
*/
public int getPositionIncrement() {
return positionIncrement;
}
@Override
public void clear() {
this.positionIncrement = 1;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof PositionIncrementAttributeImpl) {
return positionIncrement == ((PositionIncrementAttributeImpl) other).positionIncrement;
}
return false;
}
@Override
public int hashCode() {
return positionIncrement;
}
@Override
public void copyTo(AttributeImpl target) {
PositionIncrementAttribute t = (PositionIncrementAttribute) target;
t.setPositionIncrement(positionIncrement);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttributeImpl.java | Java | art | 3,423 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.Attribute;
/**
* This attribute can be used to pass different flags down the {@link Tokenizer} chain,
* eg from one TokenFilter to another one.
*/
public interface FlagsAttribute extends Attribute {
/**
* EXPERIMENTAL: While we think this is here to stay, we may want to change it to be a long.
* <p/>
*
* Get the bitset for any bits that have been set. This is completely distinct from {@link TypeAttribute#type()}, although they do share similar purposes.
* The flags can be used to encode information about the token for use by other {@link org.apache.lucene.analysis.TokenFilter}s.
*
*
* @return The bits
*/
public int getFlags();
/**
* @see #getFlags()
*/
public void setFlags(int flags);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttribute.java | Java | art | 1,679 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import org.apache.lucene.util.AttributeImpl;
/**
* The start and end character offset of a Token.
*/
public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribute, Cloneable, Serializable {
private int startOffset;
private int endOffset;
/** Returns this Token's starting offset, the position of the first character
corresponding to this token in the source text.
Note that the difference between endOffset() and startOffset() may not be
equal to termText.length(), as the term text may have been altered by a
stemmer or some other filter. */
public int startOffset() {
return startOffset;
}
/** Set the starting and ending offset.
@see #startOffset() and #endOffset()*/
public void setOffset(int startOffset, int endOffset) {
this.startOffset = startOffset;
this.endOffset = endOffset;
}
/** Returns this Token's ending offset, one greater than the position of the
last character corresponding to this token in the source text. The length
of the token in the source text is (endOffset - startOffset). */
public int endOffset() {
return endOffset;
}
@Override
public void clear() {
startOffset = 0;
endOffset = 0;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof OffsetAttributeImpl) {
OffsetAttributeImpl o = (OffsetAttributeImpl) other;
return o.startOffset == startOffset && o.endOffset == endOffset;
}
return false;
}
@Override
public int hashCode() {
int code = startOffset;
code = code * 31 + endOffset;
return code;
}
@Override
public void copyTo(AttributeImpl target) {
OffsetAttribute t = (OffsetAttribute) target;
t.setOffset(startOffset, endOffset);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttributeImpl.java | Java | art | 2,710 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.AttributeImpl;
/**
* The term text of a Token.
*/
public class TermAttributeImpl extends AttributeImpl implements TermAttribute, Cloneable, Serializable {
private static int MIN_BUFFER_SIZE = 10;
private char[] termBuffer;
private int termLength;
/** Returns the Token's term text.
*
* This method has a performance penalty
* because the text is stored internally in a char[]. If
* possible, use {@link #termBuffer()} and {@link
* #termLength()} directly instead. If you really need a
* String, use this method, which is nothing more than
* a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
*/
public String term() {
initTermBuffer();
return new String(termBuffer, 0, termLength);
}
/** Copies the contents of buffer, starting at offset for
* length characters, into the termBuffer array.
* @param buffer the buffer to copy
* @param offset the index in the buffer of the first character to copy
* @param length the number of characters to copy
*/
public void setTermBuffer(char[] buffer, int offset, int length) {
growTermBuffer(length);
System.arraycopy(buffer, offset, termBuffer, 0, length);
termLength = length;
}
/** Copies the contents of buffer into the termBuffer array.
* @param buffer the buffer to copy
*/
public void setTermBuffer(String buffer) {
int length = buffer.length();
growTermBuffer(length);
buffer.getChars(0, length, termBuffer, 0);
termLength = length;
}
/** Copies the contents of buffer, starting at offset and continuing
* for length characters, into the termBuffer array.
* @param buffer the buffer to copy
* @param offset the index in the buffer of the first character to copy
* @param length the number of characters to copy
*/
public void setTermBuffer(String buffer, int offset, int length) {
assert offset <= buffer.length();
assert offset + length <= buffer.length();
growTermBuffer(length);
buffer.getChars(offset, offset + length, termBuffer, 0);
termLength = length;
}
/** Returns the internal termBuffer character array which
* you can then directly alter. If the array is too
* small for your token, use {@link
* #resizeTermBuffer(int)} to increase it. After
* altering the buffer be sure to call {@link
* #setTermLength} to record the number of valid
* characters that were placed into the termBuffer. */
public char[] termBuffer() {
initTermBuffer();
return termBuffer;
}
/** Grows the termBuffer to at least size newSize, preserving the
* existing content. Note: If the next operation is to change
* the contents of the term buffer use
* {@link #setTermBuffer(char[], int, int)},
* {@link #setTermBuffer(String)}, or
* {@link #setTermBuffer(String, int, int)}
* to optimally combine the resize with the setting of the termBuffer.
* @param newSize minimum size of the new termBuffer
* @return newly created termBuffer with length >= newSize
*/
public char[] resizeTermBuffer(int newSize) {
if (termBuffer == null) {
// The buffer is always at least MIN_BUFFER_SIZE
termBuffer = new char[ArrayUtil.getNextSize(newSize < MIN_BUFFER_SIZE ? MIN_BUFFER_SIZE : newSize)];
} else {
if(termBuffer.length < newSize){
// Not big enough; create a new array with slight
// over allocation and preserve content
final char[] newCharBuffer = new char[ArrayUtil.getNextSize(newSize)];
System.arraycopy(termBuffer, 0, newCharBuffer, 0, termBuffer.length);
termBuffer = newCharBuffer;
}
}
return termBuffer;
}
/** Allocates a buffer char[] of at least newSize, without preserving the existing content.
* its always used in places that set the content
* @param newSize minimum size of the buffer
*/
private void growTermBuffer(int newSize) {
if (termBuffer == null) {
// The buffer is always at least MIN_BUFFER_SIZE
termBuffer = new char[ArrayUtil.getNextSize(newSize < MIN_BUFFER_SIZE ? MIN_BUFFER_SIZE : newSize)];
} else {
if(termBuffer.length < newSize){
// Not big enough; create a new array with slight
// over allocation:
termBuffer = new char[ArrayUtil.getNextSize(newSize)];
}
}
}
private void initTermBuffer() {
if (termBuffer == null) {
termBuffer = new char[ArrayUtil.getNextSize(MIN_BUFFER_SIZE)];
termLength = 0;
}
}
/** Return number of valid characters (length of the term)
* in the termBuffer array. */
public int termLength() {
return termLength;
}
/** Set number of valid characters (length of the term) in
* the termBuffer array. Use this to truncate the termBuffer
* or to synchronize with external manipulation of the termBuffer.
* Note: to grow the size of the array,
* use {@link #resizeTermBuffer(int)} first.
* @param length the truncated length
*/
public void setTermLength(int length) {
initTermBuffer();
if (length > termBuffer.length)
throw new IllegalArgumentException("length " + length + " exceeds the size of the termBuffer (" + termBuffer.length + ")");
termLength = length;
}
@Override
public int hashCode() {
initTermBuffer();
int code = termLength;
code = code * 31 + ArrayUtil.hashCode(termBuffer, 0, termLength);
return code;
}
@Override
public void clear() {
termLength = 0;
}
@Override
public Object clone() {
TermAttributeImpl t = (TermAttributeImpl)super.clone();
// Do a deep clone
if (termBuffer != null) {
t.termBuffer = (char[]) termBuffer.clone();
}
return t;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof TermAttribute) {
initTermBuffer();
TermAttributeImpl o = ((TermAttributeImpl) other);
o.initTermBuffer();
if (termLength != o.termLength)
return false;
for(int i=0;i<termLength;i++) {
if (termBuffer[i] != o.termBuffer[i]) {
return false;
}
}
return true;
}
return false;
}
@Override
public String toString() {
initTermBuffer();
return "term=" + new String(termBuffer, 0, termLength);
}
@Override
public void copyTo(AttributeImpl target) {
initTermBuffer();
TermAttribute t = (TermAttribute) target;
t.setTermBuffer(termBuffer, 0, termLength);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java | Java | art | 7,524 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.Attribute;
/**
* The start and end character offset of a Token.
*/
public interface OffsetAttribute extends Attribute {
/** Returns this Token's starting offset, the position of the first character
corresponding to this token in the source text.
Note that the difference between endOffset() and startOffset() may not be
equal to termText.length(), as the term text may have been altered by a
stemmer or some other filter. */
public int startOffset();
/** Set the starting and ending offset.
@see #startOffset() and #endOffset()*/
public void setOffset(int startOffset, int endOffset);
/** Returns this Token's ending offset, one greater than the position of the
last character corresponding to this token in the source text. The length
of the token in the source text is (endOffset - startOffset). */
public int endOffset();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/OffsetAttribute.java | Java | art | 1,755 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.AttributeImpl;
/**
* The payload of a Token. See also {@link Payload}.
*/
public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttribute, Cloneable, Serializable {
private Payload payload;
/**
* Initialize this attribute with no payload.
*/
public PayloadAttributeImpl() {}
/**
* Initialize this attribute with the given payload.
*/
public PayloadAttributeImpl(Payload payload) {
this.payload = payload;
}
/**
* Returns this Token's payload.
*/
public Payload getPayload() {
return this.payload;
}
/**
* Sets this Token's payload.
*/
public void setPayload(Payload payload) {
this.payload = payload;
}
@Override
public void clear() {
payload = null;
}
@Override
public Object clone() {
PayloadAttributeImpl clone = (PayloadAttributeImpl) super.clone();
if (payload != null) {
clone.payload = (Payload) payload.clone();
}
return clone;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof PayloadAttribute) {
PayloadAttributeImpl o = (PayloadAttributeImpl) other;
if (o.payload == null || payload == null) {
return o.payload == null && payload == null;
}
return o.payload.equals(payload);
}
return false;
}
@Override
public int hashCode() {
return (payload == null) ? 0 : payload.hashCode();
}
@Override
public void copyTo(AttributeImpl target) {
PayloadAttribute t = (PayloadAttribute) target;
t.setPayload((payload == null) ? null : (Payload) payload.clone());
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttributeImpl.java | Java | art | 2,633 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import org.apache.lucene.util.AttributeImpl;
/**
* A Token's lexical type. The Default value is "word".
*/
public class TypeAttributeImpl extends AttributeImpl implements TypeAttribute, Cloneable, Serializable {
private String type;
public static final String DEFAULT_TYPE = "word";
public TypeAttributeImpl() {
this(DEFAULT_TYPE);
}
public TypeAttributeImpl(String type) {
this.type = type;
}
/** Returns this Token's lexical type. Defaults to "word". */
public String type() {
return type;
}
/** Set the lexical type.
@see #type() */
public void setType(String type) {
this.type = type;
}
@Override
public void clear() {
type = DEFAULT_TYPE;
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
}
if (other instanceof TypeAttributeImpl) {
return type.equals(((TypeAttributeImpl) other).type);
}
return false;
}
@Override
public int hashCode() {
return type.hashCode();
}
@Override
public void copyTo(AttributeImpl target) {
TypeAttribute t = (TypeAttribute) target;
t.setType(type);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttributeImpl.java | Java | art | 2,061 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.Attribute;
/** The positionIncrement determines the position of this token
* relative to the previous Token in a TokenStream, used in phrase
* searching.
*
* <p>The default value is one.
*
* <p>Some common uses for this are:<ul>
*
* <li>Set it to zero to put multiple terms in the same position. This is
* useful if, e.g., a word has multiple stems. Searches for phrases
* including either stem will match. In this case, all but the first stem's
* increment should be set to zero: the increment of the first instance
* should be one. Repeating a token with an increment of zero can also be
* used to boost the scores of matches on that token.
*
* <li>Set it to values greater than one to inhibit exact phrase matches.
* If, for example, one does not want phrases to match across removed stop
* words, then one could build a stop word filter that removes stop words and
* also sets the increment to the number of stop words removed before each
* non-stop word. Then exact phrase queries will only match when the terms
* occur with no intervening stop words.
*
* </ul>
*
* @see org.apache.lucene.index.TermPositions
*/
public interface PositionIncrementAttribute extends Attribute {
/** Set the position increment. The default value is one.
*
* @param positionIncrement the distance from the prior term
*/
public void setPositionIncrement(int positionIncrement);
/** Returns the position increment of this Token.
* @see #setPositionIncrement
*/
public int getPositionIncrement();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java | Java | art | 2,423 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.Attribute;
/**
* The term text of a Token.
*/
public interface TermAttribute extends Attribute {
/** Returns the Token's term text.
*
* This method has a performance penalty
* because the text is stored internally in a char[]. If
* possible, use {@link #termBuffer()} and {@link
* #termLength()} directly instead. If you really need a
* String, use this method, which is nothing more than
* a convenience call to <b>new String(token.termBuffer(), 0, token.termLength())</b>
*/
public String term();
/** Copies the contents of buffer, starting at offset for
* length characters, into the termBuffer array.
* @param buffer the buffer to copy
* @param offset the index in the buffer of the first character to copy
* @param length the number of characters to copy
*/
public void setTermBuffer(char[] buffer, int offset, int length);
/** Copies the contents of buffer into the termBuffer array.
* @param buffer the buffer to copy
*/
public void setTermBuffer(String buffer);
/** Copies the contents of buffer, starting at offset and continuing
* for length characters, into the termBuffer array.
* @param buffer the buffer to copy
* @param offset the index in the buffer of the first character to copy
* @param length the number of characters to copy
*/
public void setTermBuffer(String buffer, int offset, int length);
/** Returns the internal termBuffer character array which
* you can then directly alter. If the array is too
* small for your token, use {@link
* #resizeTermBuffer(int)} to increase it. After
* altering the buffer be sure to call {@link
* #setTermLength} to record the number of valid
* characters that were placed into the termBuffer. */
public char[] termBuffer();
/** Grows the termBuffer to at least size newSize, preserving the
* existing content. Note: If the next operation is to change
* the contents of the term buffer use
* {@link #setTermBuffer(char[], int, int)},
* {@link #setTermBuffer(String)}, or
* {@link #setTermBuffer(String, int, int)}
* to optimally combine the resize with the setting of the termBuffer.
* @param newSize minimum size of the new termBuffer
* @return newly created termBuffer with length >= newSize
*/
public char[] resizeTermBuffer(int newSize);
/** Return number of valid characters (length of the term)
* in the termBuffer array. */
public int termLength();
/** Set number of valid characters (length of the term) in
* the termBuffer array. Use this to truncate the termBuffer
* or to synchronize with external manipulation of the termBuffer.
* Note: to grow the size of the array,
* use {@link #resizeTermBuffer(int)} first.
* @param length the truncated length
*/
public void setTermLength(int length);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/TermAttribute.java | Java | art | 3,757 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import org.apache.lucene.util.AttributeImpl;
/**
* This attribute can be used to pass different flags down the tokenizer chain,
* eg from one TokenFilter to another one.
*/
public class FlagsAttributeImpl extends AttributeImpl implements FlagsAttribute, Cloneable, Serializable {
private int flags = 0;
/**
* EXPERIMENTAL: While we think this is here to stay, we may want to change it to be a long.
* <p/>
*
* Get the bitset for any bits that have been set. This is completely distinct from {@link TypeAttribute#type()}, although they do share similar purposes.
* The flags can be used to encode information about the token for use by other {@link org.apache.lucene.analysis.TokenFilter}s.
*
*
* @return The bits
*/
public int getFlags() {
return flags;
}
/**
* @see #getFlags()
*/
public void setFlags(int flags) {
this.flags = flags;
}
@Override
public void clear() {
flags = 0;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof FlagsAttributeImpl) {
return ((FlagsAttributeImpl) other).flags == flags;
}
return false;
}
@Override
public int hashCode() {
return flags;
}
@Override
public void copyTo(AttributeImpl target) {
FlagsAttribute t = (FlagsAttribute) target;
t.setFlags(flags);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/FlagsAttributeImpl.java | Java | art | 2,293 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.Attribute;
/**
* The payload of a Token. See also {@link Payload}.
*/
public interface PayloadAttribute extends Attribute {
/**
* Returns this Token's payload.
*/
public Payload getPayload();
/**
* Sets this Token's payload.
*/
public void setPayload(Payload payload);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java | Java | art | 1,224 |
package org.apache.lucene.analysis.tokenattributes;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.Attribute;
/**
* A Token's lexical type. The Default value is "word".
*/
public interface TypeAttribute extends Attribute {
/** Returns this Token's lexical type. Defaults to "word". */
public String type();
/** Set the lexical type.
@see #type() */
public void setType(String type);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/tokenattributes/TypeAttribute.java | Java | art | 1,193 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Porter stemmer in Java. The original paper is in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
See also http://www.tartarus.org/~martin/PorterStemmer/index.html
Bug 1 (reported by Gonzalo Parra 16/10/99) fixed as marked below.
Tthe words 'aed', 'eed', 'oed' leave k at 'a' for step 3, and b[k-1]
is then out outside the bounds of b.
Similarly,
Bug 2 (reported by Steve Dyrdahl 22/2/00) fixed as marked below.
'ion' by itself leaves j = -1 in the test for 'ion' in step 5, and
b[j] is then outside the bounds of b.
Release 3.
[ This version is derived from Release 3, modified by Brian Goetz to
optimize for fewer object creations. ]
*/
import java.io.*;
/**
*
* Stemmer, implementing the Porter Stemming Algorithm
*
* The Stemmer class transforms a word into its root form. The input
* word can be provided a character at time (by calling add()), or at once
* by calling one of the various stem(something) methods.
*/
class PorterStemmer
{
private char[] b;
private int i, /* offset into b */
j, k, k0;
private boolean dirty = false;
private static final int INC = 50; /* unit of size whereby b is increased */
private static final int EXTRA = 1;
public PorterStemmer() {
b = new char[INC];
i = 0;
}
/**
* reset() resets the stemmer so it can stem another word. If you invoke
* the stemmer by calling add(char) and then stem(), you must call reset()
* before starting another word.
*/
public void reset() { i = 0; dirty = false; }
/**
* Add a character to the word being stemmed. When you are finished
* adding characters, you can call stem(void) to process the word.
*/
public void add(char ch) {
if (b.length <= i + EXTRA) {
char[] new_b = new char[b.length+INC];
System.arraycopy(b, 0, new_b, 0, b.length);
b = new_b;
}
b[i++] = ch;
}
/**
* After a word has been stemmed, it can be retrieved by toString(),
* or a reference to the internal buffer can be retrieved by getResultBuffer
* and getResultLength (which is generally more efficient.)
*/
@Override
public String toString() { return new String(b,0,i); }
/**
* Returns the length of the word resulting from the stemming process.
*/
public int getResultLength() { return i; }
/**
* Returns a reference to a character buffer containing the results of
* the stemming process. You also need to consult getResultLength()
* to determine the length of the result.
*/
public char[] getResultBuffer() { return b; }
/* cons(i) is true <=> b[i] is a consonant. */
private final boolean cons(int i) {
switch (b[i]) {
case 'a': case 'e': case 'i': case 'o': case 'u':
return false;
case 'y':
return (i==k0) ? true : !cons(i-1);
default:
return true;
}
}
/* m() measures the number of consonant sequences between k0 and j. if c is
a consonant sequence and v a vowel sequence, and <..> indicates arbitrary
presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
*/
private final int m() {
int n = 0;
int i = k0;
while(true) {
if (i > j)
return n;
if (! cons(i))
break;
i++;
}
i++;
while(true) {
while(true) {
if (i > j)
return n;
if (cons(i))
break;
i++;
}
i++;
n++;
while(true) {
if (i > j)
return n;
if (! cons(i))
break;
i++;
}
i++;
}
}
/* vowelinstem() is true <=> k0,...j contains a vowel */
private final boolean vowelinstem() {
int i;
for (i = k0; i <= j; i++)
if (! cons(i))
return true;
return false;
}
/* doublec(j) is true <=> j,(j-1) contain a double consonant. */
private final boolean doublec(int j) {
if (j < k0+1)
return false;
if (b[j] != b[j-1])
return false;
return cons(j);
}
/* cvc(i) is true <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short word. e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
*/
private final boolean cvc(int i) {
if (i < k0+2 || !cons(i) || cons(i-1) || !cons(i-2))
return false;
else {
int ch = b[i];
if (ch == 'w' || ch == 'x' || ch == 'y') return false;
}
return true;
}
private final boolean ends(String s) {
int l = s.length();
int o = k-l+1;
if (o < k0)
return false;
for (int i = 0; i < l; i++)
if (b[o+i] != s.charAt(i))
return false;
j = k-l;
return true;
}
/* setto(s) sets (j+1),...k to the characters in the string s, readjusting
k. */
void setto(String s) {
int l = s.length();
int o = j+1;
for (int i = 0; i < l; i++)
b[o+i] = s.charAt(i);
k = j+l;
dirty = true;
}
/* r(s) is used further down. */
void r(String s) { if (m() > 0) setto(s); }
/* step1() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
*/
private final void step1() {
if (b[k] == 's') {
if (ends("sses")) k -= 2;
else if (ends("ies")) setto("i");
else if (b[k-1] != 's') k--;
}
if (ends("eed")) {
if (m() > 0)
k--;
}
else if ((ends("ed") || ends("ing")) && vowelinstem()) {
k = j;
if (ends("at")) setto("ate");
else if (ends("bl")) setto("ble");
else if (ends("iz")) setto("ize");
else if (doublec(k)) {
int ch = b[k--];
if (ch == 'l' || ch == 's' || ch == 'z')
k++;
}
else if (m() == 1 && cvc(k))
setto("e");
}
}
/* step2() turns terminal y to i when there is another vowel in the stem. */
private final void step2() {
if (ends("y") && vowelinstem()) {
b[k] = 'i';
dirty = true;
}
}
/* step3() maps double suffices to single ones. so -ization ( = -ize plus
-ation) maps to -ize etc. note that the string before the suffix must give
m() > 0. */
private final void step3() {
if (k == k0) return; /* For Bug 1 */
switch (b[k-1]) {
case 'a':
if (ends("ational")) { r("ate"); break; }
if (ends("tional")) { r("tion"); break; }
break;
case 'c':
if (ends("enci")) { r("ence"); break; }
if (ends("anci")) { r("ance"); break; }
break;
case 'e':
if (ends("izer")) { r("ize"); break; }
break;
case 'l':
if (ends("bli")) { r("ble"); break; }
if (ends("alli")) { r("al"); break; }
if (ends("entli")) { r("ent"); break; }
if (ends("eli")) { r("e"); break; }
if (ends("ousli")) { r("ous"); break; }
break;
case 'o':
if (ends("ization")) { r("ize"); break; }
if (ends("ation")) { r("ate"); break; }
if (ends("ator")) { r("ate"); break; }
break;
case 's':
if (ends("alism")) { r("al"); break; }
if (ends("iveness")) { r("ive"); break; }
if (ends("fulness")) { r("ful"); break; }
if (ends("ousness")) { r("ous"); break; }
break;
case 't':
if (ends("aliti")) { r("al"); break; }
if (ends("iviti")) { r("ive"); break; }
if (ends("biliti")) { r("ble"); break; }
break;
case 'g':
if (ends("logi")) { r("log"); break; }
}
}
/* step4() deals with -ic-, -full, -ness etc. similar strategy to step3. */
private final void step4() {
switch (b[k]) {
case 'e':
if (ends("icate")) { r("ic"); break; }
if (ends("ative")) { r(""); break; }
if (ends("alize")) { r("al"); break; }
break;
case 'i':
if (ends("iciti")) { r("ic"); break; }
break;
case 'l':
if (ends("ical")) { r("ic"); break; }
if (ends("ful")) { r(""); break; }
break;
case 's':
if (ends("ness")) { r(""); break; }
break;
}
}
/* step5() takes off -ant, -ence etc., in context <c>vcvc<v>. */
private final void step5() {
if (k == k0) return; /* for Bug 1 */
switch (b[k-1]) {
case 'a':
if (ends("al")) break;
return;
case 'c':
if (ends("ance")) break;
if (ends("ence")) break;
return;
case 'e':
if (ends("er")) break; return;
case 'i':
if (ends("ic")) break; return;
case 'l':
if (ends("able")) break;
if (ends("ible")) break; return;
case 'n':
if (ends("ant")) break;
if (ends("ement")) break;
if (ends("ment")) break;
/* element etc. not stripped before the m */
if (ends("ent")) break;
return;
case 'o':
if (ends("ion") && j >= 0 && (b[j] == 's' || b[j] == 't')) break;
/* j >= 0 fixes Bug 2 */
if (ends("ou")) break;
return;
/* takes care of -ous */
case 's':
if (ends("ism")) break;
return;
case 't':
if (ends("ate")) break;
if (ends("iti")) break;
return;
case 'u':
if (ends("ous")) break;
return;
case 'v':
if (ends("ive")) break;
return;
case 'z':
if (ends("ize")) break;
return;
default:
return;
}
if (m() > 1)
k = j;
}
/* step6() removes a final -e if m() > 1. */
private final void step6() {
j = k;
if (b[k] == 'e') {
int a = m();
if (a > 1 || a == 1 && !cvc(k-1))
k--;
}
if (b[k] == 'l' && doublec(k) && m() > 1)
k--;
}
/**
* Stem a word provided as a String. Returns the result as a String.
*/
public String stem(String s) {
if (stem(s.toCharArray(), s.length()))
return toString();
else
return s;
}
/** Stem a word contained in a char[]. Returns true if the stemming process
* resulted in a word different from the input. You can retrieve the
* result with getResultLength()/getResultBuffer() or toString().
*/
public boolean stem(char[] word) {
return stem(word, word.length);
}
/** Stem a word contained in a portion of a char[] array. Returns
* true if the stemming process resulted in a word different from
* the input. You can retrieve the result with
* getResultLength()/getResultBuffer() or toString().
*/
public boolean stem(char[] wordBuffer, int offset, int wordLen) {
reset();
if (b.length < wordLen) {
char[] new_b = new char[wordLen + EXTRA];
b = new_b;
}
System.arraycopy(wordBuffer, offset, b, 0, wordLen);
i = wordLen;
return stem(0);
}
/** Stem a word contained in a leading portion of a char[] array.
* Returns true if the stemming process resulted in a word different
* from the input. You can retrieve the result with
* getResultLength()/getResultBuffer() or toString().
*/
public boolean stem(char[] word, int wordLen) {
return stem(word, 0, wordLen);
}
/** Stem the word placed into the Stemmer buffer through calls to add().
* Returns true if the stemming process resulted in a word different
* from the input. You can retrieve the result with
* getResultLength()/getResultBuffer() or toString().
*/
public boolean stem() {
return stem(0);
}
public boolean stem(int i0) {
k = i - 1;
k0 = i0;
if (k > k0+1) {
step1(); step2(); step3(); step4(); step5(); step6();
}
// Also, a word is considered dirty if we lopped off letters
// Thanks to Ifigenia Vairelles for pointing this out.
if (i != k+1)
dirty = true;
i = k+1;
return dirty;
}
/** Test program for demonstrating the Stemmer. It reads a file and
* stems each word, writing the result to standard out.
* Usage: Stemmer file-name
*/
public static void main(String[] args) {
PorterStemmer s = new PorterStemmer();
for (int i = 0; i < args.length; i++) {
try {
InputStream in = new FileInputStream(args[i]);
byte[] buffer = new byte[1024];
int bufferLen, offset, ch;
bufferLen = in.read(buffer);
offset = 0;
s.reset();
while(true) {
if (offset < bufferLen)
ch = buffer[offset++];
else {
bufferLen = in.read(buffer);
offset = 0;
if (bufferLen < 0)
ch = -1;
else
ch = buffer[offset++];
}
if (Character.isLetter((char) ch)) {
s.add(Character.toLowerCase((char) ch));
}
else {
s.stem();
System.out.print(s.toString());
s.reset();
if (ch < 0)
break;
else {
System.out.print((char) ch);
}
}
}
in.close();
}
catch (IOException e) {
System.out.println("error reading " + args[i]);
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/PorterStemmer.java | Java | art | 14,255 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.AttributeSource;
import java.io.Reader;
import java.io.IOException;
/** A Tokenizer is a TokenStream whose input is a Reader.
<p>
This is an abstract class; subclasses must override {@link #incrementToken()}
<p>
NOTE: Subclasses overriding {@link #incrementToken()} must
call {@link AttributeSource#clearAttributes()} before
setting attributes.
*/
public abstract class Tokenizer extends TokenStream {
/** The text source for this Tokenizer. */
protected Reader input;
/** Construct a tokenizer with null input. */
protected Tokenizer() {}
/** Construct a token stream processing the given input. */
protected Tokenizer(Reader input) {
this.input = CharReader.get(input);
}
/** Construct a tokenizer with null input using the given AttributeFactory. */
protected Tokenizer(AttributeFactory factory) {
super(factory);
}
/** Construct a token stream processing the given input using the given AttributeFactory. */
protected Tokenizer(AttributeFactory factory, Reader input) {
super(factory);
this.input = CharReader.get(input);
}
/** Construct a token stream processing the given input using the given AttributeSource. */
protected Tokenizer(AttributeSource source) {
super(source);
}
/** Construct a token stream processing the given input using the given AttributeSource. */
protected Tokenizer(AttributeSource source, Reader input) {
super(source);
this.input = CharReader.get(input);
}
/** By default, closes the input Reader. */
@Override
public void close() throws IOException {
if (input != null) {
input.close();
// LUCENE-2387: don't hold onto Reader after close, so
// GC can reclaim
input = null;
}
}
/** Return the corrected offset. If {@link #input} is a {@link CharStream} subclass
* this method calls {@link CharStream#correctOffset}, else returns <code>currentOff</code>.
* @param currentOff offset as seen in the output
* @return corrected offset based on the input
* @see CharStream#correctOffset
*/
protected final int correctOffset(int currentOff) {
return (input instanceof CharStream) ? ((CharStream) input).correctOffset(currentOff) : currentOff;
}
/** Expert: Reset the tokenizer to a new reader. Typically, an
* analyzer (in its reusableTokenStream method) will use
* this to re-use a previously created tokenizer. */
public void reset(Reader input) throws IOException {
this.input = input;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/Tokenizer.java | Java | art | 3,372 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.analysis;
import java.util.ArrayList;
import java.util.List;
/**
* Base utility class for implementing a {@link CharFilter}.
* You subclass this, and then record mappings by calling
* {@link #addOffCorrectMap}, and then invoke the correct
* method to correct an offset.
*
* <p><b>NOTE</b>: This class is not particularly efficient.
* For example, a new class instance is created for every
* call to {@link #addOffCorrectMap}, which is then appended
* to a private list.
*/
public abstract class BaseCharFilter extends CharFilter {
private List<OffCorrectMap> pcmList;
public BaseCharFilter(CharStream in) {
super(in);
}
/** Retrieve the corrected offset. Note that this method
* is slow, if you correct positions far before the most
* recently added position, as it's a simple linear
* search backwards through all offset corrections added
* by {@link #addOffCorrectMap}. */
@Override
protected int correct(int currentOff) {
if (pcmList == null || pcmList.isEmpty()) {
return currentOff;
}
for (int i = pcmList.size() - 1; i >= 0; i--) {
if (currentOff >= pcmList.get(i).off) {
return currentOff + pcmList.get(i).cumulativeDiff;
}
}
return currentOff;
}
protected int getLastCumulativeDiff() {
return pcmList == null || pcmList.isEmpty() ?
0 : pcmList.get(pcmList.size() - 1).cumulativeDiff;
}
protected void addOffCorrectMap(int off, int cumulativeDiff) {
if (pcmList == null) {
pcmList = new ArrayList<OffCorrectMap>();
}
pcmList.add(new OffCorrectMap(off, cumulativeDiff));
}
static class OffCorrectMap {
int off;
int cumulativeDiff;
OffCorrectMap(int off, int cumulativeDiff) {
this.off = off;
this.cumulativeDiff = cumulativeDiff;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('(');
sb.append(off);
sb.append(',');
sb.append(cumulativeDiff);
sb.append(')');
return sb.toString();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/BaseCharFilter.java | Java | art | 2,893 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.apache.lucene.util.AttributeSource;
/**
* This class can be used if the token attributes of a TokenStream
* are intended to be consumed more than once. It caches
* all token attribute states locally in a List.
*
* <P>CachingTokenFilter implements the optional method
* {@link TokenStream#reset()}, which repositions the
* stream to the first Token.
*/
public final class CachingTokenFilter extends TokenFilter {
private List<AttributeSource.State> cache = null;
private Iterator<AttributeSource.State> iterator = null;
private AttributeSource.State finalState;
public CachingTokenFilter(TokenStream input) {
super(input);
}
@Override
public final boolean incrementToken() throws IOException {
if (cache == null) {
// fill cache lazily
cache = new LinkedList<AttributeSource.State>();
fillCache();
iterator = cache.iterator();
}
if (!iterator.hasNext()) {
// the cache is exhausted, return false
return false;
}
// Since the TokenFilter can be reset, the tokens need to be preserved as immutable.
restoreState(iterator.next());
return true;
}
@Override
public final void end() throws IOException {
if (finalState != null) {
restoreState(finalState);
}
}
@Override
public void reset() throws IOException {
if(cache != null) {
iterator = cache.iterator();
}
}
private void fillCache() throws IOException {
while(input.incrementToken()) {
cache.add(captureState());
}
// capture final state
input.end();
finalState = captureState();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/CachingTokenFilter.java | Java | art | 2,578 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import org.apache.lucene.util.AttributeSource;
/** A LetterTokenizer is a tokenizer that divides text at non-letters. That's
to say, it defines tokens as maximal strings of adjacent letters, as defined
by java.lang.Character.isLetter() predicate.
Note: this does a decent job for most European languages, but does a terrible
job for some Asian languages, where words are not separated by spaces. */
public class LetterTokenizer extends CharTokenizer {
/** Construct a new LetterTokenizer. */
public LetterTokenizer(Reader in) {
super(in);
}
/** Construct a new LetterTokenizer using a given {@link AttributeSource}. */
public LetterTokenizer(AttributeSource source, Reader in) {
super(source, in);
}
/** Construct a new LetterTokenizer using a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}. */
public LetterTokenizer(AttributeFactory factory, Reader in) {
super(factory, in);
}
/** Collects only characters which satisfy
* {@link Character#isLetter(char)}.*/
@Override
protected boolean isTokenChar(char c) {
return Character.isLetter(c);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/LetterTokenizer.java | Java | art | 1,996 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import org.apache.lucene.util.AttributeSource;
/** A WhitespaceTokenizer is a tokenizer that divides text at whitespace.
* Adjacent sequences of non-Whitespace characters form tokens. */
public class WhitespaceTokenizer extends CharTokenizer {
/** Construct a new WhitespaceTokenizer. */
public WhitespaceTokenizer(Reader in) {
super(in);
}
/** Construct a new WhitespaceTokenizer using a given {@link AttributeSource}. */
public WhitespaceTokenizer(AttributeSource source, Reader in) {
super(source, in);
}
/** Construct a new WhitespaceTokenizer using a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}. */
public WhitespaceTokenizer(AttributeFactory factory, Reader in) {
super(factory, in);
}
/** Collects only characters which do not satisfy
* {@link Character#isWhitespace(char)}.*/
@Override
protected boolean isTokenChar(char c) {
return !Character.isWhitespace(c);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/WhitespaceTokenizer.java | Java | art | 1,817 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.util.HashMap;
import java.util.HashSet;
/**
* Loader for text files that represent a list of stopwords.
*/
public class WordlistLoader {
/**
* Loads a text file and adds every line as an entry to a HashSet (omitting
* leading and trailing whitespace). Every line of the file should contain only
* one word. The words need to be in lowercase if you make use of an
* Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
*
* @param wordfile File containing the wordlist
* @return A HashSet with the file's words
*/
public static HashSet<String> getWordSet(File wordfile) throws IOException {
HashSet<String> result = new HashSet<String>();
FileReader reader = null;
try {
reader = new FileReader(wordfile);
result = getWordSet(reader);
}
finally {
if (reader != null)
reader.close();
}
return result;
}
/**
* Loads a text file and adds every non-comment line as an entry to a HashSet (omitting
* leading and trailing whitespace). Every line of the file should contain only
* one word. The words need to be in lowercase if you make use of an
* Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
*
* @param wordfile File containing the wordlist
* @param comment The comment string to ignore
* @return A HashSet with the file's words
*/
public static HashSet<String> getWordSet(File wordfile, String comment) throws IOException {
HashSet<String> result = new HashSet<String>();
FileReader reader = null;
try {
reader = new FileReader(wordfile);
result = getWordSet(reader, comment);
}
finally {
if (reader != null)
reader.close();
}
return result;
}
/**
* Reads lines from a Reader and adds every line as an entry to a HashSet (omitting
* leading and trailing whitespace). Every line of the Reader should contain only
* one word. The words need to be in lowercase if you make use of an
* Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
*
* @param reader Reader containing the wordlist
* @return A HashSet with the reader's words
*/
public static HashSet<String> getWordSet(Reader reader) throws IOException {
HashSet<String> result = new HashSet<String>();
BufferedReader br = null;
try {
if (reader instanceof BufferedReader) {
br = (BufferedReader) reader;
} else {
br = new BufferedReader(reader);
}
String word = null;
while ((word = br.readLine()) != null) {
result.add(word.trim());
}
}
finally {
if (br != null)
br.close();
}
return result;
}
/**
* Reads lines from a Reader and adds every non-comment line as an entry to a HashSet (omitting
* leading and trailing whitespace). Every line of the Reader should contain only
* one word. The words need to be in lowercase if you make use of an
* Analyzer which uses LowerCaseFilter (like StandardAnalyzer).
*
* @param reader Reader containing the wordlist
* @param comment The string representing a comment.
* @return A HashSet with the reader's words
*/
public static HashSet<String> getWordSet(Reader reader, String comment) throws IOException {
HashSet<String> result = new HashSet<String>();
BufferedReader br = null;
try {
if (reader instanceof BufferedReader) {
br = (BufferedReader) reader;
} else {
br = new BufferedReader(reader);
}
String word = null;
while ((word = br.readLine()) != null) {
if (word.startsWith(comment) == false){
result.add(word.trim());
}
}
}
finally {
if (br != null)
br.close();
}
return result;
}
/**
* Reads a stem dictionary. Each line contains:
* <pre>word<b>\t</b>stem</pre>
* (i.e. two tab seperated words)
*
* @return stem dictionary that overrules the stemming algorithm
* @throws IOException
*/
public static HashMap<String, String> getStemDict(File wordstemfile) throws IOException {
if (wordstemfile == null)
throw new NullPointerException("wordstemfile may not be null");
HashMap<String, String> result = new HashMap<String, String>();
BufferedReader br = null;
FileReader fr = null;
try {
fr = new FileReader(wordstemfile);
br = new BufferedReader(fr);
String line;
while ((line = br.readLine()) != null) {
String[] wordstem = line.split("\t", 2);
result.put(wordstem[0], wordstem[1]);
}
} finally {
if (fr != null)
fr.close();
if (br != null)
br.close();
}
return result;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/WordlistLoader.java | Java | art | 5,693 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import java.io.IOException;
import java.util.Map;
import java.util.HashMap;
/**
* This analyzer is used to facilitate scenarios where different
* fields require different analysis techniques. Use {@link #addAnalyzer}
* to add a non-default analyzer on a field name basis.
*
* <p>Example usage:
*
* <pre>
* PerFieldAnalyzerWrapper aWrapper =
* new PerFieldAnalyzerWrapper(new StandardAnalyzer());
* aWrapper.addAnalyzer("firstname", new KeywordAnalyzer());
* aWrapper.addAnalyzer("lastname", new KeywordAnalyzer());
* </pre>
*
* <p>In this example, StandardAnalyzer will be used for all fields except "firstname"
* and "lastname", for which KeywordAnalyzer will be used.
*
* <p>A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing
* and query parsing.
*/
public class PerFieldAnalyzerWrapper extends Analyzer {
private Analyzer defaultAnalyzer;
private Map<String,Analyzer> analyzerMap = new HashMap<String,Analyzer>();
/**
* Constructs with default analyzer.
*
* @param defaultAnalyzer Any fields not specifically
* defined to use a different analyzer will use the one provided here.
*/
public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer) {
this(defaultAnalyzer, null);
}
/**
* Constructs with default analyzer and a map of analyzers to use for
* specific fields.
*
* @param defaultAnalyzer Any fields not specifically
* defined to use a different analyzer will use the one provided here.
* @param fieldAnalyzers a Map (String field name to the Analyzer) to be
* used for those fields
*/
public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer,
Map<String,Analyzer> fieldAnalyzers) {
this.defaultAnalyzer = defaultAnalyzer;
if (fieldAnalyzers != null) {
analyzerMap.putAll(fieldAnalyzers);
}
setOverridesTokenStreamMethod(PerFieldAnalyzerWrapper.class);
}
/**
* Defines an analyzer to use for the specified field.
*
* @param fieldName field name requiring a non-default analyzer
* @param analyzer non-default analyzer to use for field
*/
public void addAnalyzer(String fieldName, Analyzer analyzer) {
analyzerMap.put(fieldName, analyzer);
}
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
Analyzer analyzer = analyzerMap.get(fieldName);
if (analyzer == null) {
analyzer = defaultAnalyzer;
}
return analyzer.tokenStream(fieldName, reader);
}
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException {
if (overridesTokenStreamMethod) {
// LUCENE-1678: force fallback to tokenStream() if we
// have been subclassed and that subclass overrides
// tokenStream but not reusableTokenStream
return tokenStream(fieldName, reader);
}
Analyzer analyzer = analyzerMap.get(fieldName);
if (analyzer == null)
analyzer = defaultAnalyzer;
return analyzer.reusableTokenStream(fieldName, reader);
}
/** Return the positionIncrementGap from the analyzer assigned to fieldName */
@Override
public int getPositionIncrementGap(String fieldName) {
Analyzer analyzer = analyzerMap.get(fieldName);
if (analyzer == null)
analyzer = defaultAnalyzer;
return analyzer.getPositionIncrementGap(fieldName);
}
@Override
public String toString() {
return "PerFieldAnalyzerWrapper(" + analyzerMap + ", default=" + defaultAnalyzer + ")";
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/PerFieldAnalyzerWrapper.java | Java | art | 4,367 |
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
import org.apache.lucene.util.AttributeSource;
/**
* LowerCaseTokenizer performs the function of LetterTokenizer
* and LowerCaseFilter together. It divides text at non-letters and converts
* them to lower case. While it is functionally equivalent to the combination
* of LetterTokenizer and LowerCaseFilter, there is a performance advantage
* to doing the two tasks at once, hence this (redundant) implementation.
* <P>
* Note: this does a decent job for most European languages, but does a terrible
* job for some Asian languages, where words are not separated by spaces.
*/
public final class LowerCaseTokenizer extends LetterTokenizer {
/** Construct a new LowerCaseTokenizer. */
public LowerCaseTokenizer(Reader in) {
super(in);
}
/** Construct a new LowerCaseTokenizer using a given {@link AttributeSource}. */
public LowerCaseTokenizer(AttributeSource source, Reader in) {
super(source, in);
}
/** Construct a new LowerCaseTokenizer using a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}. */
public LowerCaseTokenizer(AttributeFactory factory, Reader in) {
super(factory, in);
}
/** Converts char to lower case
* {@link Character#toLowerCase(char)}.*/
@Override
protected char normalize(char c) {
return Character.toLowerCase(c);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/analysis/LowerCaseTokenizer.java | Java | art | 2,190 |
package org.apache.lucene.messages;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Locale;
/**
* Default implementation of Message interface.
* For Native Language Support (NLS), system of software internationalization.
*/
public class MessageImpl implements Message {
private static final long serialVersionUID = -3077643314630884523L;
private String key;
private Object[] arguments = new Object[0];
public MessageImpl(String key) {
this.key = key;
}
public MessageImpl(String key, Object... args) {
this(key);
this.arguments = args;
}
public Object[] getArguments() {
return this.arguments;
}
public String getKey() {
return this.key;
}
public String getLocalizedMessage() {
return getLocalizedMessage(Locale.getDefault());
}
public String getLocalizedMessage(Locale locale) {
return NLS.getLocalizedMessage(getKey(), locale, getArguments());
}
@Override
public String toString() {
Object[] args = getArguments();
StringBuilder sb = new StringBuilder(getKey());
if (args != null) {
for (int i = 0; i < args.length; i++) {
sb.append(i == 0 ? " " : ", ").append(args[i]);
}
}
return sb.toString();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/messages/MessageImpl.java | Java | art | 1,997 |
<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
<body>
For Native Language Support (NLS), system of software internationalization.
<h2>NLS message API</h2>
<p>
This utility API, adds support for NLS messages in the apache code.
It is currently used by the lucene "New Flexible Query PArser".
</p>
<p>
Features:
<ol>
<li>Message reference in the code, using static Strings</li>
<li>Message resource validation at class load time, for easier debugging</li>
<li>Allows for message IDs to be re-factored using eclipse or other code re-factor tools</li>
<li>Allows for reference count on messages, just like code</li>
<li>Lazy loading of Message Strings</li>
<li>Normal loading Message Strings</li>
</ol>
</p>
<br/>
<br/>
<p>
Lazy loading of Message Strings
<pre>
public class MessagesTestBundle extends NLS {
private static final String BUNDLE_NAME = MessagesTestBundle.class.getName();
private MessagesTestBundle() {
// should never be instantiated
}
static {
// register all string ids with NLS class and initialize static string
// values
NLS.initializeMessages(BUNDLE_NAME, MessagesTestBundle.class);
}
// static string must match the strings in the property files.
public static String Q0001E_INVALID_SYNTAX;
public static String Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION;
// this message is missing from the properties file
public static String Q0005E_MESSAGE_NOT_IN_BUNDLE;
}
// Create a message reference
Message invalidSyntax = new MessageImpl(MessagesTestBundle.Q0001E_INVALID_SYNTAX, "XXX");
// Do other stuff in the code...
// when is time to display the message to the user or log the message on a file
// the message is loaded from the correct bundle
String message1 = invalidSyntax.getLocalizedMessage();
String message2 = invalidSyntax.getLocalizedMessage(Locale.JAPANESE);
</pre>
</p>
<br/>
<br/>
<p>
Normal loading of Message Strings
<pre>
String message1 = NLS.getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION);
String message2 = NLS.getLocalizedMessage(MessagesTestBundle.Q0004E_INVALID_SYNTAX_ESCAPE_UNICODE_TRUNCATION, Locale.JAPANESE);
</pre>
</p>
<p>
The org.apache.lucene.messages.TestNLS junit contains several other examples.
The TestNLS java code is available from the Apache Lucene code repository.
</p>
</body>
</html>
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/messages/package.html | HTML | art | 3,364 |
package org.apache.lucene.messages;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
import java.util.MissingResourceException;
import java.util.ResourceBundle;
/**
* MessageBundles classes extend this class, to implement a bundle.
*
* For Native Language Support (NLS), system of software internationalization.
*
* This interface is similar to the NLS class in eclipse.osgi.util.NLS class -
* initializeMessages() method resets the values of all static strings, should
* only be called by classes that extend from NLS (see TestMessages.java for
* reference) - performs validation of all message in a bundle, at class load
* time - performs per message validation at runtime - see NLSTest.java for
* usage reference
*
* MessageBundle classes may subclass this type.
*/
public class NLS {
private static Map<String, Class<Object>> bundles =
new HashMap<String, Class<Object>>(0);
protected NLS() {
// Do not instantiate
}
public static String getLocalizedMessage(String key) {
return getLocalizedMessage(key, Locale.getDefault());
}
public static String getLocalizedMessage(String key, Locale locale) {
Object message = getResourceBundleObject(key, locale);
if (message == null) {
return "Message with key:" + key + " and locale: " + locale
+ " not found.";
}
return message.toString();
}
public static String getLocalizedMessage(String key, Locale locale,
Object... args) {
String str = getLocalizedMessage(key, locale);
if (args.length > 0) {
str = MessageFormat.format(str, args);
}
return str;
}
public static String getLocalizedMessage(String key, Object... args) {
return getLocalizedMessage(key, Locale.getDefault(), args);
}
/**
* Initialize a given class with the message bundle Keys Should be called from
* a class that extends NLS in a static block at class load time.
*
* @param bundleName
* Property file with that contains the message bundle
* @param clazz
* where constants will reside
*/
@SuppressWarnings("unchecked")
protected static void initializeMessages(String bundleName, Class clazz) {
try {
load(clazz);
if (!bundles.containsKey(bundleName))
bundles.put(bundleName, clazz);
} catch (Throwable e) {
// ignore all errors and exceptions
// because this function is supposed to be called at class load time.
}
}
private static Object getResourceBundleObject(String messageKey, Locale locale) {
// slow resource checking
// need to loop thru all registered resource bundles
for (Iterator<String> it = bundles.keySet().iterator(); it.hasNext();) {
Class<Object> clazz = bundles.get(it.next());
ResourceBundle resourceBundle = ResourceBundle.getBundle(clazz.getName(),
locale);
if (resourceBundle != null) {
try {
Object obj = resourceBundle.getObject(messageKey);
if (obj != null)
return obj;
} catch (MissingResourceException e) {
// just continue it might be on the next resource bundle
}
}
}
// if resource is not found
return null;
}
/**
* @param clazz
*/
private static void load(Class<Object> clazz) {
final Field[] fieldArray = clazz.getDeclaredFields();
boolean isFieldAccessible = (clazz.getModifiers() & Modifier.PUBLIC) != 0;
// build a map of field names to Field objects
final int len = fieldArray.length;
Map<String, Field> fields = new HashMap<String, Field>(len * 2);
for (int i = 0; i < len; i++) {
fields.put(fieldArray[i].getName(), fieldArray[i]);
loadfieldValue(fieldArray[i], isFieldAccessible, clazz);
}
}
/**
* @param field
* @param isFieldAccessible
*/
private static void loadfieldValue(Field field, boolean isFieldAccessible,
Class<Object> clazz) {
int MOD_EXPECTED = Modifier.PUBLIC | Modifier.STATIC;
int MOD_MASK = MOD_EXPECTED | Modifier.FINAL;
if ((field.getModifiers() & MOD_MASK) != MOD_EXPECTED)
return;
// Set a value for this empty field.
if (!isFieldAccessible)
makeAccessible(field);
try {
field.set(null, field.getName());
validateMessage(field.getName(), clazz);
} catch (IllegalArgumentException e) {
// should not happen
} catch (IllegalAccessException e) {
// should not happen
}
}
/**
* @param key
* - Message Key
*/
private static void validateMessage(String key, Class<Object> clazz) {
// Test if the message is present in the resource bundle
try {
ResourceBundle resourceBundle = ResourceBundle.getBundle(clazz.getName(),
Locale.getDefault());
if (resourceBundle != null) {
Object obj = resourceBundle.getObject(key);
if (obj == null)
System.err.println("WARN: Message with key:" + key + " and locale: "
+ Locale.getDefault() + " not found.");
}
} catch (MissingResourceException e) {
System.err.println("WARN: Message with key:" + key + " and locale: "
+ Locale.getDefault() + " not found.");
} catch (Throwable e) {
// ignore all other errors and exceptions
// since this code is just a test to see if the message is present on the
// system
}
}
/*
* Make a class field accessible
*/
@SuppressWarnings("unchecked")
private static void makeAccessible(final Field field) {
if (System.getSecurityManager() == null) {
field.setAccessible(true);
} else {
AccessController.doPrivileged(new PrivilegedAction() {
public Object run() {
field.setAccessible(true);
return null;
}
});
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/messages/NLS.java | Java | art | 6,808 |
package org.apache.lucene.messages;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import java.util.Locale;
/**
* Message Interface for a lazy loading.
* For Native Language Support (NLS), system of software internationalization.
*/
public interface Message extends Serializable {
public String getKey();
public Object[] getArguments();
public String getLocalizedMessage();
public String getLocalizedMessage(Locale locale);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/messages/Message.java | Java | art | 1,230 |
package org.apache.lucene.messages;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Interface that exceptions should implement to support lazy loading of messages.
*
* For Native Language Support (NLS), system of software internationalization.
*
* This Interface should be implemented by all exceptions that require
* translation
*
*/
public interface NLSException {
/**
* @return a instance of a class that implements the Message interface
*/
public Message getMessageObject();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/messages/NLSException.java | Java | art | 1,265 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** Base implementation class for buffered {@link IndexInput}. */
public abstract class BufferedIndexInput extends IndexInput {
/** Default buffer size */
public static final int BUFFER_SIZE = 1024;
private int bufferSize = BUFFER_SIZE;
protected byte[] buffer;
private long bufferStart = 0; // position in file of buffer
private int bufferLength = 0; // end of valid bytes
private int bufferPosition = 0; // next byte to read
@Override
public byte readByte() throws IOException {
if (bufferPosition >= bufferLength)
refill();
return buffer[bufferPosition++];
}
public BufferedIndexInput() {}
/** Inits BufferedIndexInput with a specific bufferSize */
public BufferedIndexInput(int bufferSize) {
checkBufferSize(bufferSize);
this.bufferSize = bufferSize;
}
/** Change the buffer size used by this IndexInput */
public void setBufferSize(int newSize) {
assert buffer == null || bufferSize == buffer.length: "buffer=" + buffer + " bufferSize=" + bufferSize + " buffer.length=" + (buffer != null ? buffer.length : 0);
if (newSize != bufferSize) {
checkBufferSize(newSize);
bufferSize = newSize;
if (buffer != null) {
// Resize the existing buffer and carefully save as
// many bytes as possible starting from the current
// bufferPosition
byte[] newBuffer = new byte[newSize];
final int leftInBuffer = bufferLength-bufferPosition;
final int numToCopy;
if (leftInBuffer > newSize)
numToCopy = newSize;
else
numToCopy = leftInBuffer;
System.arraycopy(buffer, bufferPosition, newBuffer, 0, numToCopy);
bufferStart += bufferPosition;
bufferPosition = 0;
bufferLength = numToCopy;
newBuffer(newBuffer);
}
}
}
protected void newBuffer(byte[] newBuffer) {
// Subclasses can do something here
buffer = newBuffer;
}
/** Returns buffer size. @see #setBufferSize */
public int getBufferSize() {
return bufferSize;
}
private void checkBufferSize(int bufferSize) {
if (bufferSize <= 0)
throw new IllegalArgumentException("bufferSize must be greater than 0 (got " + bufferSize + ")");
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
readBytes(b, offset, len, true);
}
@Override
public void readBytes(byte[] b, int offset, int len, boolean useBuffer) throws IOException {
if(len <= (bufferLength-bufferPosition)){
// the buffer contains enough data to satisfy this request
if(len>0) // to allow b to be null if len is 0...
System.arraycopy(buffer, bufferPosition, b, offset, len);
bufferPosition+=len;
} else {
// the buffer does not have enough data. First serve all we've got.
int available = bufferLength - bufferPosition;
if(available > 0){
System.arraycopy(buffer, bufferPosition, b, offset, available);
offset += available;
len -= available;
bufferPosition += available;
}
// and now, read the remaining 'len' bytes:
if (useBuffer && len<bufferSize){
// If the amount left to read is small enough, and
// we are allowed to use our buffer, do it in the usual
// buffered way: fill the buffer and copy from it:
refill();
if(bufferLength<len){
// Throw an exception when refill() could not read len bytes:
System.arraycopy(buffer, 0, b, offset, bufferLength);
throw new IOException("read past EOF");
} else {
System.arraycopy(buffer, 0, b, offset, len);
bufferPosition=len;
}
} else {
// The amount left to read is larger than the buffer
// or we've been asked to not use our buffer -
// there's no performance reason not to read it all
// at once. Note that unlike the previous code of
// this function, there is no need to do a seek
// here, because there's no need to reread what we
// had in the buffer.
long after = bufferStart+bufferPosition+len;
if(after > length())
throw new IOException("read past EOF");
readInternal(b, offset, len);
bufferStart = after;
bufferPosition = 0;
bufferLength = 0; // trigger refill() on read
}
}
}
private void refill() throws IOException {
long start = bufferStart + bufferPosition;
long end = start + bufferSize;
if (end > length()) // don't read past EOF
end = length();
int newLength = (int)(end - start);
if (newLength <= 0)
throw new IOException("read past EOF");
if (buffer == null) {
newBuffer(new byte[bufferSize]); // allocate buffer lazily
seekInternal(bufferStart);
}
readInternal(buffer, 0, newLength);
bufferLength = newLength;
bufferStart = start;
bufferPosition = 0;
}
/** Expert: implements buffer refill. Reads bytes from the current position
* in the input.
* @param b the array to read bytes into
* @param offset the offset in the array to start storing bytes
* @param length the number of bytes to read
*/
protected abstract void readInternal(byte[] b, int offset, int length)
throws IOException;
@Override
public long getFilePointer() { return bufferStart + bufferPosition; }
@Override
public void seek(long pos) throws IOException {
if (pos >= bufferStart && pos < (bufferStart + bufferLength))
bufferPosition = (int)(pos - bufferStart); // seek within buffer
else {
bufferStart = pos;
bufferPosition = 0;
bufferLength = 0; // trigger refill() on read()
seekInternal(pos);
}
}
/** Expert: implements seek. Sets current position in this file, where the
* next {@link #readInternal(byte[],int,int)} will occur.
* @see #readInternal(byte[],int,int)
*/
protected abstract void seekInternal(long pos) throws IOException;
@Override
public Object clone() {
BufferedIndexInput clone = (BufferedIndexInput)super.clone();
clone.buffer = null;
clone.bufferLength = 0;
clone.bufferPosition = 0;
clone.bufferStart = getFilePointer();
return clone;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/BufferedIndexInput.java | Java | art | 7,162 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.store;
import java.io.IOException;
/**
* This exception is thrown when the <code>write.lock</code>
* could not be acquired. This
* happens when a writer tries to open an index
* that another writer already has open.
* @see Lock#obtain(long).
*/
public class LockObtainFailedException extends IOException {
public LockObtainFailedException(String message) {
super(message);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/LockObtainFailedException.java | Java | art | 1,224 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.store;
import java.io.IOException;
/**
* This exception is thrown when the <code>write.lock</code>
* could not be released.
* @see Lock#release().
*/
public class LockReleaseFailedException extends IOException {
public LockReleaseFailedException(String message) {
super(message);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/LockReleaseFailedException.java | Java | art | 1,128 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.File;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.BufferUnderflowException;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import java.security.AccessController;
import java.security.PrivilegedExceptionAction;
import java.security.PrivilegedActionException;
import java.lang.reflect.Method;
import org.apache.lucene.util.Constants;
/** File-based {@link Directory} implementation that uses
* mmap for reading, and {@link
* SimpleFSDirectory.SimpleFSIndexOutput} for writing.
*
* <p><b>NOTE</b>: memory mapping uses up a portion of the
* virtual memory address space in your process equal to the
* size of the file being mapped. Before using this class,
* be sure your have plenty of virtual address space, e.g. by
* using a 64 bit JRE, or a 32 bit JRE with indexes that are
* guaranteed to fit within the address space.
* On 32 bit platforms also consult {@link #setMaxChunkSize}
* if you have problems with mmap failing because of fragmented
* address space. If you get an OutOfMemoryException, it is recommended
* to reduce the chunk size, until it works.
*
* <p>Due to <a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038">
* this bug</a> in Sun's JRE, MMapDirectory's {@link IndexInput#close}
* is unable to close the underlying OS file handle. Only when GC
* finally collects the underlying objects, which could be quite
* some time later, will the file handle be closed.
*
* <p>This will consume additional transient disk usage: on Windows,
* attempts to delete or overwrite the files will result in an
* exception; on other platforms, which typically have a "delete on
* last close" semantics, while such operations will succeed, the bytes
* are still consuming space on disk. For many applications this
* limitation is not a problem (e.g. if you have plenty of disk space,
* and you don't rely on overwriting files on Windows) but it's still
* an important limitation to be aware of.
*
* <p>This class supplies the workaround mentioned in the bug report
* (disabled by default, see {@link #setUseUnmap}), which may fail on
* non-Sun JVMs. It forcefully unmaps the buffer on close by using
* an undocumented internal cleanup functionality.
* {@link #UNMAP_SUPPORTED} is <code>true</code>, if the workaround
* can be enabled (with no guarantees).
*/
public class MMapDirectory extends FSDirectory {
/** Create a new MMapDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public MMapDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new MMapDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public MMapDirectory(File path) throws IOException {
super(path, null);
}
private boolean useUnmapHack = false;
private int maxBBuf = Constants.JRE_IS_64BIT ? Integer.MAX_VALUE : (256*1024*1024);
/**
* <code>true</code>, if this platform supports unmapping mmapped files.
*/
public static final boolean UNMAP_SUPPORTED;
static {
boolean v;
try {
Class.forName("sun.misc.Cleaner");
Class.forName("java.nio.DirectByteBuffer")
.getMethod("cleaner");
v = true;
} catch (Exception e) {
v = false;
}
UNMAP_SUPPORTED = v;
}
/**
* This method enables the workaround for unmapping the buffers
* from address space after closing {@link IndexInput}, that is
* mentioned in the bug report. This hack may fail on non-Sun JVMs.
* It forcefully unmaps the buffer on close by using
* an undocumented internal cleanup functionality.
* <p><b>NOTE:</b> Enabling this is completely unsupported
* by Java and may lead to JVM crashes if <code>IndexInput</code>
* is closed while another thread is still accessing it (SIGSEGV).
* @throws IllegalArgumentException if {@link #UNMAP_SUPPORTED}
* is <code>false</code> and the workaround cannot be enabled.
*/
public void setUseUnmap(final boolean useUnmapHack) {
if (useUnmapHack && !UNMAP_SUPPORTED)
throw new IllegalArgumentException("Unmap hack not supported on this platform!");
this.useUnmapHack=useUnmapHack;
}
/**
* Returns <code>true</code>, if the unmap workaround is enabled.
* @see #setUseUnmap
*/
public boolean getUseUnmap() {
return useUnmapHack;
}
/**
* Try to unmap the buffer, this method silently fails if no support
* for that in the JVM. On Windows, this leads to the fact,
* that mmapped files cannot be modified or deleted.
*/
final void cleanMapping(final ByteBuffer buffer) throws IOException {
if (useUnmapHack) {
try {
AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
public Object run() throws Exception {
final Method getCleanerMethod = buffer.getClass()
.getMethod("cleaner");
getCleanerMethod.setAccessible(true);
final Object cleaner = getCleanerMethod.invoke(buffer);
if (cleaner != null) {
cleaner.getClass().getMethod("clean")
.invoke(cleaner);
}
return null;
}
});
} catch (PrivilegedActionException e) {
final IOException ioe = new IOException("unable to unmap the mapped buffer");
ioe.initCause(e.getCause());
throw ioe;
}
}
}
/**
* Sets the maximum chunk size (default is {@link Integer#MAX_VALUE} for
* 64 bit JVMs and 256 MiBytes for 32 bit JVMs) used for memory mapping.
* Especially on 32 bit platform, the address space can be very fragmented,
* so large index files cannot be mapped.
* Using a lower chunk size makes the directory implementation a little
* bit slower (as the correct chunk must be resolved on each seek)
* but the chance is higher that mmap does not fail. On 64 bit
* Java platforms, this parameter should always be {@link Integer#MAX_VALUE},
* as the address space is big enough.
*/
public void setMaxChunkSize(final int maxBBuf) {
if (maxBBuf<=0)
throw new IllegalArgumentException("Maximum chunk size for mmap must be >0");
this.maxBBuf=maxBBuf;
}
/**
* Returns the current mmap chunk size.
* @see #setMaxChunkSize
*/
public int getMaxChunkSize() {
return maxBBuf;
}
private class MMapIndexInput extends IndexInput {
private ByteBuffer buffer;
private final long length;
private boolean isClone = false;
private MMapIndexInput(RandomAccessFile raf) throws IOException {
this.length = raf.length();
this.buffer = raf.getChannel().map(MapMode.READ_ONLY, 0, length);
}
@Override
public byte readByte() throws IOException {
try {
return buffer.get();
} catch (BufferUnderflowException e) {
throw new IOException("read past EOF");
}
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
try {
buffer.get(b, offset, len);
} catch (BufferUnderflowException e) {
throw new IOException("read past EOF");
}
}
@Override
public long getFilePointer() {
return buffer.position();
}
@Override
public void seek(long pos) throws IOException {
buffer.position((int)pos);
}
@Override
public long length() {
return length;
}
@Override
public Object clone() {
MMapIndexInput clone = (MMapIndexInput)super.clone();
clone.isClone = true;
clone.buffer = buffer.duplicate();
return clone;
}
@Override
public void close() throws IOException {
if (isClone || buffer == null) return;
// unmap the buffer (if enabled) and at least unset it for GC
try {
cleanMapping(buffer);
} finally {
buffer = null;
}
}
}
// Because Java's ByteBuffer uses an int to address the
// values, it's necessary to access a file >
// Integer.MAX_VALUE in size using multiple byte buffers.
private class MultiMMapIndexInput extends IndexInput {
private ByteBuffer[] buffers;
private int[] bufSizes; // keep here, ByteBuffer.size() method is optional
private final long length;
private int curBufIndex;
private final int maxBufSize;
private ByteBuffer curBuf; // redundant for speed: buffers[curBufIndex]
private int curAvail; // redundant for speed: (bufSizes[curBufIndex] - curBuf.position())
private boolean isClone = false;
public MultiMMapIndexInput(RandomAccessFile raf, int maxBufSize)
throws IOException {
this.length = raf.length();
this.maxBufSize = maxBufSize;
if (maxBufSize <= 0)
throw new IllegalArgumentException("Non positive maxBufSize: "
+ maxBufSize);
if ((length / maxBufSize) > Integer.MAX_VALUE)
throw new IllegalArgumentException
("RandomAccessFile too big for maximum buffer size: "
+ raf.toString());
int nrBuffers = (int) (length / maxBufSize);
if (((long) nrBuffers * maxBufSize) < length) nrBuffers++;
this.buffers = new ByteBuffer[nrBuffers];
this.bufSizes = new int[nrBuffers];
long bufferStart = 0;
FileChannel rafc = raf.getChannel();
for (int bufNr = 0; bufNr < nrBuffers; bufNr++) {
int bufSize = (length > (bufferStart + maxBufSize))
? maxBufSize
: (int) (length - bufferStart);
this.buffers[bufNr] = rafc.map(MapMode.READ_ONLY,bufferStart,bufSize);
this.bufSizes[bufNr] = bufSize;
bufferStart += bufSize;
}
seek(0L);
}
@Override
public byte readByte() throws IOException {
// Performance might be improved by reading ahead into an array of
// e.g. 128 bytes and readByte() from there.
if (curAvail == 0) {
curBufIndex++;
if (curBufIndex >= buffers.length)
throw new IOException("read past EOF");
curBuf = buffers[curBufIndex];
curBuf.position(0);
curAvail = bufSizes[curBufIndex];
}
curAvail--;
return curBuf.get();
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
while (len > curAvail) {
curBuf.get(b, offset, curAvail);
len -= curAvail;
offset += curAvail;
curBufIndex++;
if (curBufIndex >= buffers.length)
throw new IOException("read past EOF");
curBuf = buffers[curBufIndex];
curBuf.position(0);
curAvail = bufSizes[curBufIndex];
}
curBuf.get(b, offset, len);
curAvail -= len;
}
@Override
public long getFilePointer() {
return ((long) curBufIndex * maxBufSize) + curBuf.position();
}
@Override
public void seek(long pos) throws IOException {
curBufIndex = (int) (pos / maxBufSize);
curBuf = buffers[curBufIndex];
int bufOffset = (int) (pos - ((long) curBufIndex * maxBufSize));
curBuf.position(bufOffset);
curAvail = bufSizes[curBufIndex] - bufOffset;
}
@Override
public long length() {
return length;
}
@Override
public Object clone() {
MultiMMapIndexInput clone = (MultiMMapIndexInput)super.clone();
clone.isClone = true;
clone.buffers = new ByteBuffer[buffers.length];
// No need to clone bufSizes.
// Since most clones will use only one buffer, duplicate() could also be
// done lazy in clones, e.g. when adapting curBuf.
for (int bufNr = 0; bufNr < buffers.length; bufNr++) {
clone.buffers[bufNr] = buffers[bufNr].duplicate();
}
try {
clone.seek(getFilePointer());
} catch(IOException ioe) {
RuntimeException newException = new RuntimeException(ioe);
newException.initCause(ioe);
throw newException;
};
return clone;
}
@Override
public void close() throws IOException {
if (isClone || buffers == null) return;
try {
for (int bufNr = 0; bufNr < buffers.length; bufNr++) {
// unmap the buffer (if enabled) and at least unset it for GC
try {
cleanMapping(buffers[bufNr]);
} finally {
buffers[bufNr] = null;
}
}
} finally {
buffers = null;
}
}
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
File f = new File(getFile(), name);
RandomAccessFile raf = new RandomAccessFile(f, "r");
try {
return (raf.length() <= (long) maxBBuf)
? (IndexInput) new MMapIndexInput(raf)
: (IndexInput) new MultiMMapIndexInput(raf, maxBBuf);
} finally {
raf.close();
}
}
/** Creates an IndexOutput for the file with the given name. */
@Override
public IndexOutput createOutput(String name) throws IOException {
initOutput(name);
return new SimpleFSDirectory.SimpleFSIndexOutput(new File(directory, name));
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/MMapDirectory.java | Java | art | 14,349 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/**
* <p>Base class for Locking implementation. {@link Directory} uses
* instances of this class to implement locking.</p>
*
* <p>Note that there are some useful tools to verify that
* your LockFactory is working correctly: {@link
* VerifyingLockFactory}, {@link LockStressTest}, {@link
* LockVerifyServer}.</p>
*
* @see LockVerifyServer
* @see LockStressTest
* @see VerifyingLockFactory
*/
public abstract class LockFactory {
protected String lockPrefix = null;
/**
* Set the prefix in use for all locks created in this
* LockFactory. This is normally called once, when a
* Directory gets this LockFactory instance. However, you
* can also call this (after this instance is assigned to
* a Directory) to override the prefix in use. This
* is helpful if you're running Lucene on machines that
* have different mount points for the same shared
* directory.
*/
public void setLockPrefix(String lockPrefix) {
this.lockPrefix = lockPrefix;
}
/**
* Get the prefix in use for all locks created in this LockFactory.
*/
public String getLockPrefix() {
return this.lockPrefix;
}
/**
* Return a new Lock instance identified by lockName.
* @param lockName name of the lock to be created.
*/
public abstract Lock makeLock(String lockName);
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param lockName name of the lock to be cleared.
*/
abstract public void clearLock(String lockName) throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/LockFactory.java | Java | art | 2,483 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Closeable;
import org.apache.lucene.index.IndexFileNameFilter;
/** A Directory is a flat list of files. Files may be written once, when they
* are created. Once a file is created it may only be opened for read, or
* deleted. Random access is permitted both when reading and writing.
*
* <p> Java's i/o APIs not used directly, but rather all i/o is
* through this API. This permits things such as: <ul>
* <li> implementation of RAM-based indices;
* <li> implementation indices stored in a database, via JDBC;
* <li> implementation of an index as a single file;
* </ul>
*
* Directory locking is implemented by an instance of {@link
* LockFactory}, and can be changed for each Directory
* instance using {@link #setLockFactory}.
*
*/
public abstract class Directory implements Closeable {
volatile protected boolean isOpen = true;
/** Holds the LockFactory instance (implements locking for
* this Directory instance). */
protected LockFactory lockFactory;
/** Returns an array of strings, one for each file in the
* directory.
* @throws IOException
*/
public abstract String[] listAll() throws IOException;
/** Returns true iff a file with the given name exists. */
public abstract boolean fileExists(String name)
throws IOException;
/** Returns the time the named file was last modified. */
public abstract long fileModified(String name)
throws IOException;
/** Set the modified time of an existing file to now. */
public abstract void touchFile(String name)
throws IOException;
/** Removes an existing file in the directory. */
public abstract void deleteFile(String name)
throws IOException;
/** Returns the length of a file in the directory. */
public abstract long fileLength(String name)
throws IOException;
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public abstract IndexOutput createOutput(String name)
throws IOException;
/** Ensure that any writes to this file are moved to
* stable storage. Lucene uses this to properly commit
* changes to the index, to prevent a machine/OS crash
* from corrupting the index. */
public void sync(String name) throws IOException {}
/** Returns a stream reading an existing file. */
public abstract IndexInput openInput(String name)
throws IOException;
/** Returns a stream reading an existing file, with the
* specified read buffer size. The particular Directory
* implementation may ignore the buffer size. Currently
* the only Directory implementations that respect this
* parameter are {@link FSDirectory} and {@link
* org.apache.lucene.index.CompoundFileReader}.
*/
public IndexInput openInput(String name, int bufferSize) throws IOException {
return openInput(name);
}
/** Construct a {@link Lock}.
* @param name the name of the lock file
*/
public Lock makeLock(String name) {
return lockFactory.makeLock(name);
}
/**
* Attempt to clear (forcefully unlock and remove) the
* specified lock. Only call this at a time when you are
* certain this lock is no longer in use.
* @param name name of the lock to be cleared.
*/
public void clearLock(String name) throws IOException {
if (lockFactory != null) {
lockFactory.clearLock(name);
}
}
/** Closes the store. */
public abstract void close()
throws IOException;
/**
* Set the LockFactory that this Directory instance should
* use for its locking implementation. Each * instance of
* LockFactory should only be used for one directory (ie,
* do not share a single instance across multiple
* Directories).
*
* @param lockFactory instance of {@link LockFactory}.
*/
public void setLockFactory(LockFactory lockFactory) {
assert lockFactory != null;
this.lockFactory = lockFactory;
lockFactory.setLockPrefix(this.getLockID());
}
/**
* Get the LockFactory that this Directory instance is
* using for its locking implementation. Note that this
* may be null for Directory implementations that provide
* their own locking implementation.
*/
public LockFactory getLockFactory() {
return this.lockFactory;
}
/**
* Return a string identifier that uniquely differentiates
* this Directory instance from other Directory instances.
* This ID should be the same if two Directory instances
* (even in different JVMs and/or on different machines)
* are considered "the same index". This is how locking
* "scopes" to the right index.
*/
public String getLockID() {
return this.toString();
}
/**
* Copy contents of a directory src to a directory dest.
* If a file in src already exists in dest then the
* one in dest will be blindly overwritten.
*
* <p><b>NOTE:</b> the source directory cannot change
* while this method is running. Otherwise the results
* are undefined and you could easily hit a
* FileNotFoundException.
*
* <p><b>NOTE:</b> this method only copies files that look
* like index files (ie, have extensions matching the
* known extensions of index files).
*
* @param src source directory
* @param dest destination directory
* @param closeDirSrc if <code>true</code>, call {@link #close()} method on source directory
* @throws IOException
*/
public static void copy(Directory src, Directory dest, boolean closeDirSrc) throws IOException {
final String[] files = src.listAll();
IndexFileNameFilter filter = IndexFileNameFilter.getFilter();
byte[] buf = new byte[BufferedIndexOutput.BUFFER_SIZE];
for (int i = 0; i < files.length; i++) {
if (!filter.accept(null, files[i]))
continue;
IndexOutput os = null;
IndexInput is = null;
try {
// create file in dest directory
os = dest.createOutput(files[i]);
// read current file
is = src.openInput(files[i]);
// and copy to dest directory
long len = is.length();
long readCount = 0;
while (readCount < len) {
int toRead = readCount + BufferedIndexOutput.BUFFER_SIZE > len ? (int)(len - readCount) : BufferedIndexOutput.BUFFER_SIZE;
is.readBytes(buf, 0, toRead);
os.writeBytes(buf, toRead);
readCount += toRead;
}
} finally {
// graceful cleanup
try {
if (os != null)
os.close();
} finally {
if (is != null)
is.close();
}
}
}
if(closeDirSrc)
src.close();
}
/**
* @throws AlreadyClosedException if this Directory is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (!isOpen)
throw new AlreadyClosedException("this Directory is closed");
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/Directory.java | Java | art | 7,753 |