proj_name
stringclasses 131
values | relative_path
stringlengths 30
228
| class_name
stringlengths 1
68
| func_name
stringlengths 1
48
| masked_class
stringlengths 78
9.82k
| func_body
stringlengths 46
9.61k
| len_input
int64 29
2.01k
| len_output
int64 14
1.94k
| total
int64 55
2.05k
| relevant_context
stringlengths 0
38.4k
|
|---|---|---|---|---|---|---|---|---|---|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/WordRepeatBeginningRule.java
|
WordRepeatBeginningRule
|
match
|
class WordRepeatBeginningRule extends TextLevelRule {
public WordRepeatBeginningRule(ResourceBundle messages, Language language) {
super(messages);
super.setCategory(Categories.REPETITIONS_STYLE.getCategory(messages));
setLocQualityIssueType(ITSIssueType.Style);
}
@Override
public String getId() {
return "WORD_REPEAT_BEGINNING_RULE";
}
@Override
public String getDescription() {
return messages.getString("desc_repetition_beginning");
}
protected boolean isAdverb(AnalyzedTokenReadings token) {
return false;
}
public boolean isException(String token) {
// avoid warning when having lists like "2007: ..." or the like
return token.equals(":") || token.equals("–") || token.equals("-") || token.equals("✔️") || token.equals("➡️")
|| token.equals("—") || token.equals("⭐️") || token.equals("⚠️");
}
public boolean isSentenceException(AnalyzedSentence sentence) {
return false;
}
@Override
public RuleMatch[] match(List<AnalyzedSentence> sentences) throws IOException {<FILL_FUNCTION_BODY>}
protected List<String> getSuggestions(AnalyzedTokenReadings analyzedToken) {
return Collections.emptyList();
}
@Override
public int minToCheckParagraph() {
return 2;
}
}
|
String lastToken = "";
String beforeLastToken = "";
List<RuleMatch> ruleMatches = new ArrayList<>();
int pos = 0;
AnalyzedSentence prevSentence = null;
for (AnalyzedSentence sentence : sentences) {
if (isSentenceException(sentence)) {
prevSentence = null;
continue;
}
AnalyzedTokenReadings[] tokens = sentence.getTokensWithoutWhitespace();
String token = "";
if (tokens.length > 1) {
AnalyzedTokenReadings analyzedToken = tokens[1];
token = analyzedToken.getToken();
if (tokens.length > 3) {
// avoid "..." etc. to be matched:
boolean isWord = true;
if (token.length() == 1) {
if (!Character.isLetter(token.charAt(0))) {
isWord = false;
}
}
if (isWord && lastToken.equals(token) && !isException(token) && !isException(tokens[2].getToken())
&& !isException(tokens[3].getToken()) && prevSentence != null
&& prevSentence.getText().trim().matches(".+[.?!]$")) { // no matches for e.g. table cells
String shortMsg;
if (isAdverb(analyzedToken)) {
shortMsg = messages.getString("desc_repetition_beginning_adv");
} else if (beforeLastToken.equals(token)) {
shortMsg = messages.getString("desc_repetition_beginning_word");
} else {
shortMsg = "";
}
if (!shortMsg.isEmpty()) {
String msg = shortMsg + " " + messages.getString("desc_repetition_beginning_thesaurus");
int startPos = analyzedToken.getStartPos();
int endPos = startPos + token.length();
RuleMatch ruleMatch = new RuleMatch(this, sentence, pos + startPos, pos + endPos, msg, shortMsg);
List<String> suggestions = getSuggestions(analyzedToken);
if (suggestions.size() > 0) {
ruleMatch.setSuggestedReplacements(suggestions);
}
ruleMatches.add(ruleMatch);
}
}
}
}
beforeLastToken = lastToken;
lastToken = token;
pos += sentence.getCorrectedTextLength();
prevSentence = sentence;
}
return toRuleMatchArray(ruleMatches);
| 416
| 625
| 1,041
|
<methods>public void <init>() ,public void <init>(java.util.ResourceBundle) ,public int estimateContextForSureMatch() ,public org.languagetool.rules.RuleMatch[] match(List<org.languagetool.AnalyzedSentence>, org.languagetool.markup.AnnotatedText) throws java.io.IOException,public abstract org.languagetool.rules.RuleMatch[] match(List<org.languagetool.AnalyzedSentence>) throws java.io.IOException,public final org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public abstract int minToCheckParagraph() <variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/WordRepeatRule.java
|
WordRepeatRule
|
match
|
class WordRepeatRule extends Rule {
public WordRepeatRule(ResourceBundle messages, Language language) {
super(messages);
super.setCategory(Categories.MISC.getCategory(messages));
setLocQualityIssueType(ITSIssueType.Duplication);
}
/**
* Implement this method to return <code>true</code> if there's
* a potential word repetition at the current position that should be ignored,
* i.e. if no error should be created.
* @param tokens the tokens of the sentence currently being checked
* @param position the current position in the tokens
* @return this implementation always returns false
*/
public boolean ignore(AnalyzedTokenReadings[] tokens, int position) {
if (wordRepetitionOf("Phi", tokens, position)) {
return true; // "Phi Phi Islands"
} else if (wordRepetitionOf("Li", tokens, position)) {
return true; // "Li Li", Chinese name
} else if (wordRepetitionOf("Xiao", tokens, position)) {
return true; // "Xiao Xiao", name
} else if (wordRepetitionOf("Duran", tokens, position)) {
return true; // "Duran Duran"
} else if (wordRepetitionOf("Wagga", tokens, position)) {
return true; // "Wagga Wagga"
} else if (wordRepetitionOf("Abdullah", tokens, position)) {
return true; // https://en.wikipedia.org/wiki/Abdullah_Abdullah
} else if (wordRepetitionOf("Nwe", tokens, position)) {
return true; // e.g. https://en.wikipedia.org/wiki/Nwe_Nwe_Aung
} else if (wordRepetitionOf("Pago", tokens, position)) {
return true; // "Pago Pago"
} else if (wordRepetitionOf("Cao", tokens, position)) {
return true; // https://en.wikipedia.org/wiki/Cao_Cao
}
return false;
}
@Override
public String getId() {
return "WORD_REPEAT_RULE";
}
@Override
public String getDescription() {
return messages.getString("desc_repetition");
}
@Override
public int estimateContextForSureMatch() {
return 1;
}
@Override
public RuleMatch[] match(AnalyzedSentence sentence) {<FILL_FUNCTION_BODY>}
protected RuleMatch createRuleMatch(String prevToken, String token, int prevPos, int pos, String msg, AnalyzedSentence sentence) {
RuleMatch ruleMatch = new RuleMatch(this, sentence, prevPos, pos+prevToken.length(), msg, messages.getString("desc_repetition_short"));
ruleMatch.setSuggestedReplacement(prevToken);
return ruleMatch;
}
protected boolean wordRepetitionOf(String word, AnalyzedTokenReadings[] tokens, int position) {
return position > 0 && tokens[position - 1].getToken().equals(word) && tokens[position].getToken().equals(word);
}
// avoid "..." etc. to be matched:
private boolean isWord(String token) {
if (StringTools.isEmoji(token)) {
return false;
}
if (StringUtils.isNumericSpace(token)) {
return false;
} else if (token.length() == 1) {
char c = token.charAt(0);
if (!Character.isLetter(c)) {
return false;
}
}
return true;
}
}
|
List<RuleMatch> ruleMatches = new ArrayList<>();
AnalyzedTokenReadings[] tokens = getSentenceWithImmunization(sentence).getTokensWithoutWhitespace();
String prevToken = "";
// we start from token 1, token no. 0 is guaranteed to be SENT_START
for (int i = 1; i < tokens.length; i++) {
String token = tokens[i].getToken();
if (tokens[i].isImmunized()) {
prevToken = "";
continue;
}
if (isWord(token) && prevToken.equalsIgnoreCase(token) && !ignore(tokens, i)) {
String msg = messages.getString("repetition");
int prevPos = tokens[i - 1].getStartPos();
int pos = tokens[i].getStartPos();
RuleMatch ruleMatch = createRuleMatch(prevToken, token, prevPos, pos, msg, sentence);
ruleMatches.add(ruleMatch);
}
prevToken = token;
}
return toRuleMatchArray(ruleMatches);
| 946
| 271
| 1,217
|
<methods>public void <init>() ,public void <init>(java.util.ResourceBundle) ,public void addTags(List<java.lang.String>) ,public void addToneTags(List<java.lang.String>) ,public int estimateContextForSureMatch() ,public List<org.languagetool.tagging.disambiguation.rules.DisambiguationPatternRule> getAntiPatterns() ,public org.languagetool.rules.Category getCategory() ,public java.lang.String getConfigureText() ,public final List<org.languagetool.rules.CorrectExample> getCorrectExamples() ,public int getDefaultValue() ,public abstract java.lang.String getDescription() ,public int getDistanceTokens() ,public final List<org.languagetool.rules.ErrorTriggeringExample> getErrorTriggeringExamples() ,public java.lang.String getFullId() ,public abstract java.lang.String getId() ,public final List<org.languagetool.rules.IncorrectExample> getIncorrectExamples() ,public org.languagetool.rules.ITSIssueType getLocQualityIssueType() ,public int getMaxConfigurableValue() ,public int getMinConfigurableValue() ,public int getMinPrevMatches() ,public int getPriority() ,public java.lang.String getSourceFile() ,public java.lang.String getSubId() ,public List<org.languagetool.Tag> getTags() ,public List<org.languagetool.ToneTag> getToneTags() ,public java.net.URL getUrl() ,public boolean hasConfigurableValue() ,public boolean hasTag(org.languagetool.Tag) ,public boolean hasToneTag(org.languagetool.ToneTag) ,public final boolean isDefaultOff() ,public final boolean isDefaultTempOff() ,public boolean isDictionaryBasedSpellingRule() ,public boolean isGoalSpecific() ,public final boolean isOfficeDefaultOff() ,public final boolean isOfficeDefaultOn() ,public boolean isPremium() ,public abstract org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public final void setCategory(org.languagetool.rules.Category) ,public final void setCorrectExamples(List<org.languagetool.rules.CorrectExample>) ,public final void setDefaultOff() ,public final void setDefaultOn() ,public final void setDefaultTempOff() ,public void setDistanceTokens(int) ,public final void setErrorTriggeringExamples(List<org.languagetool.rules.ErrorTriggeringExample>) ,public void setGoalSpecific(boolean) ,public final void setIncorrectExamples(List<org.languagetool.rules.IncorrectExample>) ,public void setLocQualityIssueType(org.languagetool.rules.ITSIssueType) ,public void setMinPrevMatches(int) ,public final void setOfficeDefaultOff() ,public final void setOfficeDefaultOn() ,public void setPremium(boolean) ,public void setPriority(int) ,public void setTags(List<org.languagetool.Tag>) ,public void setToneTags(List<org.languagetool.ToneTag>) ,public void setUrl(java.net.URL) ,public boolean supportsLanguage(org.languagetool.Language) ,public boolean useInOffice() <variables>private static final org.languagetool.rules.Category MISC,private org.languagetool.rules.Category category,private List<org.languagetool.rules.CorrectExample> correctExamples,private boolean defaultOff,private boolean defaultTempOff,private int distanceTokens,private List<org.languagetool.rules.ErrorTriggeringExample> errorTriggeringExamples,private List<org.languagetool.rules.IncorrectExample> incorrectExamples,private boolean isGoalSpecific,private boolean isPremium,private org.languagetool.rules.ITSIssueType locQualityIssueType,protected final non-sealed java.util.ResourceBundle messages,private int minPrevMatches,private boolean officeDefaultOff,private boolean officeDefaultOn,private int priority,private List<org.languagetool.Tag> tags,private List<org.languagetool.ToneTag> toneTags,private java.net.URL url
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/YMDDateHelper.java
|
YMDDateHelper
|
parseDate
|
class YMDDateHelper {
public YMDDateHelper() {
}
public Map<String, String> parseDate(Map<String, String> args) {<FILL_FUNCTION_BODY>}
public RuleMatch correctDate(RuleMatch match, Map<String, String> args) {
String year = args.get("year");
String month = args.get("month");
String day = args.get("day");
int correctYear = Integer.parseInt(year) + 1;
String correctDate = String.format("%d-%s-%s", correctYear, month, day);
String message = match.getMessage()
.replace("{realDate}", correctDate);
RuleMatch ruleMatch = new RuleMatch(match.getRule(), match.getSentence(), match.getFromPos(),
match.getToPos(), message, match.getShortMessage());
ruleMatch.setType(match.getType());
return ruleMatch;
}
}
|
String dateString = args.get("date");
if (dateString == null) {
throw new IllegalArgumentException("Missing key 'date'");
}
String[] parts = dateString.split("-");
if (parts.length != 3) {
throw new RuntimeException("Expected date in format 'yyyy-mm-dd': '" + dateString + "'");
}
args.put("year", parts[0]);
args.put("month", parts[1]);
args.put("day", parts[2]);
return args;
| 240
| 136
| 376
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/bitext/DifferentLengthRule.java
|
DifferentLengthRule
|
match
|
class DifferentLengthRule extends BitextRule {
private static final int MAX_SKEW = 250;
private static final int MIN_SKEW = 30;
public DifferentLengthRule() {
setLocQualityIssueType(ITSIssueType.Length);
}
@Override
public String getDescription() {
return "Check if translation length is similar to source length";
}
@Override
public String getId() {
return "TRANSLATION_LENGTH";
}
@Override
public String getMessage() {
return "Source and target translation lengths are very different";
}
@Override
public RuleMatch[] match(AnalyzedSentence sourceText,
AnalyzedSentence targetText) throws IOException {<FILL_FUNCTION_BODY>}
private boolean isLengthDifferent(String src, String trg) {
double skew = ((double) src.length() / (double) trg.length()) * 100.00;
return skew > MAX_SKEW || skew < MIN_SKEW;
}
}
|
if (isLengthDifferent(sourceText.getText(), targetText.getText())) {
AnalyzedTokenReadings[] tokens = targetText.getTokens();
int endPos = tokens[tokens.length - 1].getStartPos() + tokens[tokens.length - 1].getToken().length();
return new RuleMatch[] { new RuleMatch(this, targetText, 0, endPos, getMessage()) };
}
return new RuleMatch[0];
| 284
| 117
| 401
|
<methods>public non-sealed void <init>() ,public final List<org.languagetool.bitext.StringPair> getCorrectBitextExamples() ,public final List<org.languagetool.rules.bitext.IncorrectBitextExample> getIncorrectBitextExamples() ,public abstract java.lang.String getMessage() ,public static List<Class<? extends org.languagetool.rules.bitext.BitextRule>> getRelevantRules() ,public final org.languagetool.Language getSourceLanguage() ,public abstract org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence, org.languagetool.AnalyzedSentence) throws java.io.IOException,public org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public final void setCorrectBitextExamples(List<org.languagetool.bitext.StringPair>) ,public final void setIncorrectBitextExamples(List<org.languagetool.rules.bitext.IncorrectBitextExample>) ,public final void setSourceLanguage(org.languagetool.Language) <variables>private List<org.languagetool.bitext.StringPair> correctExamples,private List<org.languagetool.rules.bitext.IncorrectBitextExample> incorrectExamples,private org.languagetool.Language sourceLanguage
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/bitext/IncorrectBitextExample.java
|
IncorrectBitextExample
|
toString
|
class IncorrectBitextExample {
private final StringPair example;
private final List<String> corrections;
public IncorrectBitextExample(StringPair example) {
this(example, Collections.emptyList());
}
/**
* @since 2.9
*/
public IncorrectBitextExample(StringPair example, List<String> corrections) {
this.example = Objects.requireNonNull(example);
this.corrections = Collections.unmodifiableList(corrections);
}
/**
* Return the example that contains the error.
*/
public StringPair getExample() {
return example;
}
/**
* Return the possible corrections.
*/
public List<String> getCorrections() {
return corrections;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return example.getSource() + "/ " + example.getTarget() + " " + corrections;
| 234
| 26
| 260
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/ngrams/GoogleToken.java
|
GoogleToken
|
getGoogleTokens
|
class GoogleToken {
final String token;
final int startPos;
final int endPos;
final Set<AnalyzedToken> posTags;
GoogleToken(String token, int startPos, int endPos) {
this(token, startPos, endPos, Collections.emptySet());
}
GoogleToken(String token, int startPos, int endPos, Set<AnalyzedToken> posTags) {
this.token = "’".equals(token) ? "'" : token; // Google seems to have indexed the apostrophe always like this
this.startPos = startPos;
this.endPos = endPos;
this.posTags = posTags;
}
Set<AnalyzedToken> getPosTags() {
return posTags;
}
boolean isWhitespace() {
return StringTools.isWhitespace(token);
}
@Override
public String toString() {
return token;
}
// Tokenization in google ngram corpus is different from LT tokenization (e.g. {@code you ' re} -> {@code you 're}),
// so we use getTokenizer() and simple ignore the LT tokens.
static List<GoogleToken> getGoogleTokens(String sentence, boolean addStartToken, Tokenizer wordTokenizer) {
List<GoogleToken> result = new ArrayList<>();
if (addStartToken) {
result.add(new GoogleToken(LanguageModel.GOOGLE_SENTENCE_START, 0, 0));
}
List<String> tokens = wordTokenizer.tokenize(sentence);
int startPos = 0;
for (String token : tokens) {
if (!StringTools.isWhitespace(token)) {
result.add(new GoogleToken(token, startPos, startPos+token.length()));
}
startPos += token.length();
}
return result;
}
// Tokenization in google ngram corpus is different from LT tokenization (e.g. {@code you ' re} -> {@code you 're}),
// so we use getTokenizer() and simple ignore the LT tokens. Also adds POS tags from original sentence if trivially possible.
static List<GoogleToken> getGoogleTokens(AnalyzedSentence sentence, boolean addStartToken, Tokenizer wordTokenizer) {<FILL_FUNCTION_BODY>}
private static Set<AnalyzedToken> findOriginalAnalyzedTokens(AnalyzedSentence sentence, int startPos, int endPos) {
Set<AnalyzedToken> result = new HashSet<>();
for (AnalyzedTokenReadings tokens : sentence.getTokensWithoutWhitespace()) {
if (tokens.getStartPos() == startPos && tokens.getEndPos() == endPos) {
for (AnalyzedToken analyzedToken : tokens.getReadings()) {
result.add(analyzedToken);
}
}
}
return result;
}
}
|
List<GoogleToken> result = new ArrayList<>();
if (addStartToken) {
result.add(new GoogleToken(LanguageModel.GOOGLE_SENTENCE_START, 0, 0));
}
List<String> tokens = wordTokenizer.tokenize(sentence.getText());
int startPos = 0;
for (String token : tokens) {
if (!StringTools.isWhitespace(token)) {
int endPos = startPos + token.length();
Set<AnalyzedToken> pos = findOriginalAnalyzedTokens(sentence, startPos, endPos);
GoogleToken gToken = new GoogleToken(token, startPos, endPos, pos);
result.add(gToken);
}
startPos += token.length();
}
return result;
| 736
| 200
| 936
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/ngrams/GoogleTokenUtil.java
|
GoogleTokenUtil
|
getGoogleTokensForString
|
class GoogleTokenUtil {
public static List<String> getGoogleTokensForString(String sentence, boolean addStartToken, Language language) {<FILL_FUNCTION_BODY>}
}
|
List<String> tokens = new LinkedList<>();
for (GoogleToken token : GoogleToken.getGoogleTokens(sentence, addStartToken, language.getWordTokenizer())) {
tokens.add(token.token);
}
return tokens;
| 48
| 65
| 113
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/AbstractTokenBasedRule.java
|
TokenHint
|
equals
|
class TokenHint {
final boolean inflected;
final String[] lowerCaseValues;
final int tokenIndex;
private TokenHint(boolean inflected, Set<String> possibleValues, int tokenIndex) {
this.inflected = inflected;
this.tokenIndex = tokenIndex;
lowerCaseValues = possibleValues.stream().map(String::toLowerCase).distinct().toArray(String[]::new);
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(inflected, tokenIndex, Arrays.hashCode(lowerCaseValues));
}
/**
* @return all indices inside sentence's non-blank tokens where this token could possibly match
*/
List<Integer> getPossibleIndices(AnalyzedSentence sentence) {
boolean needMerge = false;
List<Integer> result = null;
for (String hint : lowerCaseValues) {
List<Integer> hintIndices = getHintIndices(sentence, hint);
if (hintIndices != null) {
if (result == null) {
result = hintIndices;
} else {
if (!needMerge) {
result = new ArrayList<>(result);
needMerge = true;
}
result.addAll(hintIndices);
}
}
}
if (result == null) return Collections.emptyList();
return needMerge ? new ArrayList<>(new TreeSet<>(result)) : result;
}
private boolean canBeIgnoredFor(AnalyzedSentence sentence) {
for (String hint : lowerCaseValues) {
if (getHintIndices(sentence, hint) != null) {
return false;
}
}
return true;
}
@Nullable
private List<Integer> getHintIndices(AnalyzedSentence sentence, String hint) {
return inflected ? sentence.getLemmaOffsets(hint) : sentence.getTokenOffsets(hint);
}
}
|
if (this == o) return true;
if (!(o instanceof TokenHint)) return false;
TokenHint tokenHint = (TokenHint) o;
return inflected == tokenHint.inflected &&
tokenIndex == tokenHint.tokenIndex &&
Arrays.equals(lowerCaseValues, tokenHint.lowerCaseValues);
| 535
| 92
| 627
|
<methods>public void <init>(java.lang.String, java.lang.String, org.languagetool.Language, List<org.languagetool.rules.patterns.PatternToken>, boolean, java.lang.String) ,public void <init>(java.lang.String, java.lang.String, org.languagetool.Language, List<org.languagetool.rules.patterns.PatternToken>, boolean) ,public final void addSuggestionMatch(org.languagetool.rules.patterns.Match) ,public final void addSuggestionMatchOutMsg(org.languagetool.rules.patterns.Match) ,public final List<org.languagetool.tagging.disambiguation.rules.DisambiguationPatternRule> getAntiPatterns() ,public java.lang.String getDescription() ,public final int getEndPositionCorrection() ,public org.languagetool.rules.patterns.RuleFilter getFilter() ,public java.lang.String getFilterArguments() ,public java.lang.String getFullId() ,public java.lang.String getId() ,public final org.languagetool.Language getLanguage() ,public final java.lang.String getMessage() ,public org.languagetool.rules.patterns.PatternRuleId getPatternRuleId() ,public List<org.languagetool.rules.patterns.PatternToken> getPatternTokens() ,public java.lang.String getSourceFile() ,public final int getStartPositionCorrection() ,public final java.lang.String getSubId() ,public final java.lang.String getSuggestionsOutMsg() ,public org.languagetool.rules.RuleMatch.Type getType() ,public int getXmlLineNumber() ,public boolean isAdjustSuggestionCase() ,public boolean isGetUnified() ,public boolean isGroupsOrUnification() ,public boolean isSentStart() ,public boolean isTestUnification() ,public org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public void setAdjustSuggestionCase(boolean) ,public void setAntiPatterns(List<org.languagetool.tagging.disambiguation.rules.DisambiguationPatternRule>) ,public final void setEndPositionCorrection(int) ,public void setFilter(org.languagetool.rules.patterns.RuleFilter) ,public void setFilterArguments(java.lang.String) ,public final void setMessage(java.lang.String) ,public final void setStartPositionCorrection(int) ,public final void setSubId(java.lang.String) ,public void setType(org.languagetool.rules.RuleMatch.Type) ,public void setXmlLineNumber(int) ,public boolean supportsLanguage(org.languagetool.Language) ,public java.lang.String toString() <variables>private boolean adjustSuggestionCase,protected List<org.languagetool.tagging.disambiguation.rules.DisambiguationPatternRule> antiPatterns,private final non-sealed java.lang.String description,protected int endPositionCorrection,protected org.languagetool.rules.patterns.RuleFilter filter,protected java.lang.String filterArgs,private final non-sealed boolean getUnified,private final non-sealed boolean groupsOrUnification,private final non-sealed java.lang.String id,protected final non-sealed org.languagetool.Language language,private int lineNumber,protected java.lang.String message,protected final non-sealed List<org.languagetool.rules.patterns.PatternToken> patternTokens,protected final non-sealed boolean sentStart,protected java.lang.String sourceFile,protected int startPositionCorrection,protected java.lang.String subId,protected List<org.languagetool.rules.patterns.Match> suggestionMatches,protected List<org.languagetool.rules.patterns.Match> suggestionMatchesOutMsg,protected java.lang.String suggestionsOutMsg,protected final non-sealed boolean testUnification,protected org.languagetool.rules.RuleMatch.Type type
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/ApostropheTypeFilter.java
|
ApostropheTypeFilter
|
acceptRuleMatch
|
class ApostropheTypeFilter extends RuleFilter {
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> arguments, int patternTokenPos,
AnalyzedTokenReadings[] patternTokens, List<Integer> tokenPositions) throws IOException {<FILL_FUNCTION_BODY>}
}
|
String wordFrom = getRequired("wordFrom", arguments);
boolean hasTypographicalApostrophe = getRequired("hasTypographicalApostrophe", arguments).equalsIgnoreCase("true");
if (wordFrom != null) {
int posWord = 0;
if (wordFrom.equals("marker")) {
while (posWord < patternTokens.length && patternTokens[posWord].getStartPos() < match.getFromPos()) {
posWord++;
}
posWord++;
} else {
posWord = Integer.parseInt(wordFrom);
}
if (posWord < 1 || posWord > patternTokens.length) {
throw new IllegalArgumentException("ApostropheTypeFilter: Index out of bounds in "
+ match.getRule().getFullId() + ", wordFrom: " + posWord);
}
AnalyzedTokenReadings atrWord = patternTokens[posWord - 1];
if (hasTypographicalApostrophe == atrWord.hasTypographicApostrophe()) {
return match;
}
}
return null;
| 81
| 273
| 354
|
<methods>public non-sealed void <init>() ,public abstract org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) throws java.io.IOException,public boolean matches(Map<java.lang.String,java.lang.String>, org.languagetool.AnalyzedTokenReadings[], int, List<java.lang.Integer>) throws java.io.IOException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/CaseConversionHelper.java
|
CaseConversionHelper
|
convertCase
|
class CaseConversionHelper {
private CaseConversionHelper() {
}
/**
* Converts case of the string token according to match element attributes.
* @param s Token to be converted.
* @param sample the sample string used to determine how the original string looks like (used only on case preservation)
* @return Converted string.
*/
public static String convertCase(Match.CaseConversion conversion, String s, String sample, Language lang) {<FILL_FUNCTION_BODY>}
}
|
if (StringTools.isEmpty(s)) {
return s;
}
String token = s;
switch (conversion) {
case NONE:
break;
case PRESERVE:
if (StringTools.startsWithUppercase(sample)) {
if (StringTools.isAllUppercase(sample)) {
token = token.toUpperCase(Locale.ENGLISH);
} else {
token = StringTools.uppercaseFirstChar(token, lang);
}
}
break;
case STARTLOWER:
token = token.substring(0, 1).toLowerCase() + token.substring(1);
break;
case STARTUPPER:
token = StringTools.uppercaseFirstChar(token, lang);
break;
case ALLUPPER:
token = token.toUpperCase(Locale.ENGLISH);
break;
case FIRSTUPPER:
token = token.toLowerCase();
token = StringTools.uppercaseFirstChar(token, lang);
break;
case ALLLOWER:
token = token.toLowerCase();
break;
case NOTASHKEEL:
token = StringTools.removeTashkeel(token);
break;
default:
break;
}
return token;
| 131
| 334
| 465
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/ConsistencyPatternRuleTransformer.java
|
ConsistencyPatternRule
|
match
|
class ConsistencyPatternRule extends TextLevelRule {
protected final Language ruleLanguage;
ConsistencyPatternRule(List<AbstractPatternRule> rules, Language lang) {
this.rules = Collections.unmodifiableList(rules);
this.ruleLanguage = lang;
setPremium(rules.stream().anyMatch(r -> r.isPremium()));
}
private final List<AbstractPatternRule> rules;
public List<AbstractPatternRule> getWrappedRules() {
return rules;
}
@Override
public String getId() {
return getMainRuleId(rules.get(0).getId());
}
@Override
public String getDescription() {
return rules.get(0).getDescription();
}
@Override
public RuleMatch[] match(List<AnalyzedSentence> sentences) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public int minToCheckParagraph() {
// TODO: what should we use here? calculate based on min_prev_matches?
return 0;
}
@Override
public boolean supportsLanguage(Language language) {
return language.equalsConsiderVariantsIfSpecified(this.ruleLanguage);
}
}
|
Map<String, Integer> countFeatures = new HashMap<>();
int offsetChars = 0;
List<RuleMatch> matches = new ArrayList<>();
for (AnalyzedSentence s : sentences) {
List<RuleMatch> sentenceMatches = new ArrayList<>();
for (AbstractPatternRule rule : rules) {
RuleMatch[] ruleMatches = rule.match(s);
sentenceMatches.addAll(Arrays.asList(ruleMatches));
}
sentenceMatches = new SameRuleGroupFilter().filter(sentenceMatches);
// we need to adjust offsets since each pattern rule returns offsets relative to the sentence, not text
List<RuleMatch> adjustedSentenceMatches = new ArrayList<>();
for (RuleMatch rm : sentenceMatches) {
rm.setSentencePosition(rm.getFromPos(), rm.getToPos());
int fromPos = rm.getFromPos() + offsetChars;
int toPos = rm.getToPos() + offsetChars;
rm.setOffsetPosition(fromPos, toPos);
adjustedSentenceMatches.add(rm);
}
matches.addAll(adjustedSentenceMatches);
offsetChars += s.getText().length();
}
List<RuleMatch> resultMatches = new ArrayList<>();
// count occurrences of features
for (RuleMatch rm : matches) {
String feature = getFeature(rm.getRule().getId());
countFeatures.put(feature, countFeatures.getOrDefault(feature,0) + 1);
}
if (countFeatures.size()<2) {
// there is no inconsistency
return resultMatches.toArray(new RuleMatch[0]);
}
int max = Collections.max(countFeatures.values());
ArrayList<String> featuresWithMax = new ArrayList<>();
ArrayList<String> featuresToKeep = new ArrayList<>();
ArrayList<String> featuresToSuggest = new ArrayList<>();
for (Map.Entry<String, Integer> entry : countFeatures.entrySet()) {
if (entry.getValue()==max) {
featuresWithMax.add(entry.getKey());
} else {
featuresToKeep.add(entry.getKey());
}
}
featuresToSuggest.addAll(featuresWithMax);
if (featuresWithMax.size()>1) {
featuresToKeep.addAll(featuresWithMax);
}
for (RuleMatch rm : matches) {
if (featuresToKeep.contains(getFeature(rm.getRule().getId()))) {
resultMatches.add(ruleLanguage.adjustMatch(rm, featuresToSuggest));
}
}
return resultMatches.toArray(new RuleMatch[0]);
| 315
| 671
| 986
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/EquivalenceTypeLocator.java
|
EquivalenceTypeLocator
|
equals
|
class EquivalenceTypeLocator {
private final String feature;
private final String type;
EquivalenceTypeLocator(String feature, String type) {
this.feature = feature;
this.type = type;
}
@Override
public int hashCode() {
return Objects.hash(feature, type);
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
}
|
if (this == o) { return true; }
if (o == null) { return false; }
if (getClass() != o.getClass()) {
return false;
}
EquivalenceTypeLocator other = (EquivalenceTypeLocator) o;
return Objects.equals(feature, other.feature)
&& Objects.equals(type, other.type);
| 117
| 100
| 217
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/FalseFriendRuleHandler.java
|
FalseFriendRuleHandler
|
characters
|
class FalseFriendRuleHandler extends XMLRuleHandler {
// Definitions of values in XML files:
private static final String TRANSLATION = "translation";
private final ResourceBundle englishMessages;
private final ResourceBundle messages;
private final MessageFormat formatter;
private final Language textLanguage;
private final Language motherTongue;
private final Map<String, List<String>> suggestionMap = new HashMap<>(); // rule ID -> list of translations
private final List<String> suggestions = new ArrayList<>();
private final List<StringBuilder> translations = new ArrayList<>();
private final String falseFriendHint;
private boolean defaultOff;
private Language language;
private Language translationLanguage;
private Language currentTranslationLanguage;
private StringBuilder translation = new StringBuilder();
private boolean inTranslation;
FalseFriendRuleHandler(Language textLanguage, Language motherTongue, String falseFriendHint) {
englishMessages = ResourceBundleTools.getMessageBundle(Languages.getLanguageForShortCode("en-US"));
messages = ResourceBundleTools.getMessageBundle(motherTongue);
formatter = new MessageFormat("");
formatter.setLocale(motherTongue.getLocale());
this.textLanguage = textLanguage;
this.motherTongue = motherTongue;
this.falseFriendHint = falseFriendHint;
}
public Map<String, List<String>> getSuggestionMap() {
return suggestionMap;
}
// ===========================================================
// SAX DocumentHandler methods
// ===========================================================
@Override
public void startElement(String namespaceURI, String lName,
String qName, Attributes attrs) throws SAXException {
if (qName.equals(RULE)) {
translations.clear();
id = attrs.getValue("id");
if (!(inRuleGroup && defaultOff)) {
defaultOff = "off".equals(attrs.getValue("default"));
}
if (inRuleGroup && id == null) {
id = ruleGroupId;
}
correctExamples = new ArrayList<>();
incorrectExamples = new ArrayList<>();
} else if (qName.equals(PATTERN)) {
inPattern = true;
String languageStr = attrs.getValue("lang");
if (Languages.isLanguageSupported(languageStr)) {
language = Languages.getLanguageForShortCode(languageStr);
}
} else if (qName.equals(TOKEN)) {
setToken(attrs);
} else if (qName.equals(TRANSLATION)) {
inTranslation = true;
String languageStr = attrs.getValue("lang");
if (Languages.isLanguageSupported(languageStr)) {
Language tmpLang = Languages.getLanguageForShortCode(languageStr);
currentTranslationLanguage = tmpLang;
if (tmpLang.equalsConsiderVariantsIfSpecified(motherTongue)) {
translationLanguage = tmpLang;
}
}
} else if (qName.equals(EXAMPLE)) {
correctExample = new StringBuilder();
incorrectExample = new StringBuilder();
if (attrs.getValue(TYPE).equals("incorrect")) {
inIncorrectExample = true;
} else if (attrs.getValue(TYPE).equals("correct")) {
inCorrectExample = true;
} else if (attrs.getValue(TYPE).equals("triggers_error")) {
throw new RuntimeException("'triggers_error' is not supported for false friend XML");
}
} else if (qName.equals(MESSAGE)) {
inMessage = true;
message = new StringBuilder();
} else if (qName.equals(RULEGROUP)) {
ruleGroupId = attrs.getValue("id");
inRuleGroup = true;
defaultOff = "off".equals(attrs.getValue(DEFAULT));
}
}
@Override
public void endElement(String namespaceURI, String sName,
String qName) throws SAXException {
switch (qName) {
case RULE:
if (language.equalsConsiderVariantsIfSpecified(textLanguage) && translationLanguage != null
&& translationLanguage.equalsConsiderVariantsIfSpecified(motherTongue) && language != motherTongue
&& !translations.isEmpty()) {
formatter.applyPattern(falseFriendHint);
String tokensAsString = StringUtils.join(patternTokens, " ").replace('|', '/');
Object[] messageArguments = {tokensAsString,
englishMessages.getString(textLanguage.getShortCode()),
formatTranslations(translations),
englishMessages.getString(motherTongue.getShortCode())};
String description = formatter.format(messageArguments);
PatternRule rule = new FalseFriendPatternRule(id, language, patternTokens,
messages.getString("false_friend_desc") + " "
+ tokensAsString, description, messages.getString("false_friend"));
rule.setCorrectExamples(correctExamples);
rule.setIncorrectExamples(incorrectExamples);
rule.setCategory(Categories.FALSE_FRIENDS.getCategory(messages));
if (defaultOff) {
rule.setDefaultOff();
}
rules.add(rule);
}
if (patternTokens != null) {
patternTokens.clear();
}
break;
case TOKEN:
finalizeTokens(language.getUnifierConfiguration());
break;
case PATTERN:
inPattern = false;
break;
case TRANSLATION:
if (currentTranslationLanguage != null && currentTranslationLanguage.equalsConsiderVariantsIfSpecified(motherTongue)) {
// currentTranslationLanguage can be null if the language is not supported
translations.add(translation);
}
if (currentTranslationLanguage != null && currentTranslationLanguage.equalsConsiderVariantsIfSpecified(textLanguage)
&& language.equalsConsiderVariantsIfSpecified(motherTongue) && !suggestions.contains(translation.toString())) {
suggestions.add(translation.toString());
}
translation = new StringBuilder();
inTranslation = false;
currentTranslationLanguage = null;
break;
case EXAMPLE:
if (inCorrectExample) {
correctExamples.add(new CorrectExample(correctExample.toString()));
} else if (inIncorrectExample) {
incorrectExamples.add(new IncorrectExample(incorrectExample.toString()));
}
inCorrectExample = false;
inIncorrectExample = false;
correctExample = new StringBuilder();
incorrectExample = new StringBuilder();
break;
case MESSAGE:
inMessage = false;
break;
case RULEGROUP:
if (!suggestions.isEmpty()) {
List<String> l = new ArrayList<>(suggestions);
suggestionMap.put(id, l);
suggestions.clear();
}
inRuleGroup = false;
break;
}
}
private String formatTranslations(List<StringBuilder> translations) {
return translations.stream().map(o -> "\"" + o + "\"").collect(Collectors.joining(", "));
}
@Override
public void characters(char[] buf, int offset, int len) {<FILL_FUNCTION_BODY>}
}
|
String s = new String(buf, offset, len);
if (inToken && inPattern) {
elements.append(s);
} else if (inCorrectExample) {
correctExample.append(s);
} else if (inIncorrectExample) {
incorrectExample.append(s);
} else if (inTranslation) {
translation.append(s);
}
| 1,854
| 100
| 1,954
|
<methods>public void <init>() ,public void error(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public List<org.languagetool.rules.patterns.AbstractPatternRule> getRules() ,public void setDocumentLocator(org.xml.sax.Locator) ,public void warning(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException<variables>protected static final java.lang.String AND,protected static final java.lang.String ANTIPATTERN,protected static final java.lang.String CASE_SENSITIVE,protected static final java.lang.String CHUNKTAG,protected static final java.lang.String CHUNKTAG_REGEXP,protected static final java.lang.String DEFAULT,protected static final java.lang.String DISTANCETOKENS,protected static final java.lang.String EXAMPLE,protected static final java.lang.String EXCEPTION,protected static final java.lang.String FALSE,protected static final java.lang.String FEATURE,protected static final java.lang.String GOAL_SPECIFIC,public static final java.lang.String ID,protected static final java.lang.String IGNORE,protected static final java.lang.String INFLECTED,protected static final java.lang.String MARK,protected static final java.lang.String MARKER,protected static final java.lang.String MATCH,protected static final java.lang.String MAX,protected static final java.lang.String MESSAGE,protected static final java.lang.String MIN,protected static final java.lang.String MINPREVMATCHES,public static final java.lang.String NAME,protected static final java.lang.String NEGATE,protected static final java.lang.String NEGATE_POS,protected static final java.lang.String NO,protected static final java.lang.String OFF,protected static final java.lang.String ON,protected static final java.lang.String OR,protected static final java.lang.String PATTERN,protected static final java.lang.String PHRASES,protected static final java.lang.String POSTAG,protected static final java.lang.String POSTAG_REGEXP,protected static final java.lang.String PREMIUM,protected static final java.lang.String PRIO,protected static final java.lang.String REGEXP,protected static final java.lang.String RULE,protected static final java.lang.String RULEGROUP,protected static final java.lang.String RULES,protected static final java.lang.String SCOPE,protected static final java.lang.String SKIP,protected static final java.lang.String SPACEBEFORE,protected static final java.lang.String SUGGESTION,protected static final java.lang.String TABNAME,protected static final java.lang.String TEMP_OFF,protected static final java.lang.String TOKEN,protected static final java.lang.String TRUE,protected static final java.lang.String TYPE,protected static final java.lang.String UNIFICATION,protected static final java.lang.String UNIFY,protected static final java.lang.String UNIFY_IGNORE,protected static final java.lang.String YES,protected int andGroupCounter,protected java.lang.StringBuilder antiPatternExample,protected java.lang.StringBuilder antiPatternForRuleGroupExample,protected List<org.languagetool.rules.CorrectExample> antipatternExamples,protected List<org.languagetool.rules.CorrectExample> antipatternForRuleGroupsExamples,protected boolean caseSensitive,protected List<java.lang.String> categoryTags,protected List<java.lang.String> categoryToneTags,protected org.languagetool.chunking.ChunkTag chunkTag,protected java.lang.StringBuilder correctExample,protected List<org.languagetool.rules.CorrectExample> correctExamples,protected java.lang.StringBuilder elements,protected int endPositionCorrection,protected Map<java.lang.String,List<java.lang.String>> equivalenceFeatures,protected java.lang.StringBuilder errorTriggerExample,protected List<org.languagetool.rules.ErrorTriggeringExample> errorTriggeringExamples,protected java.lang.StringBuilder exampleCorrection,protected java.lang.Boolean exceptionLevelCaseSensitive,protected boolean exceptionLevelCaseSet,protected boolean exceptionPosNegation,protected boolean exceptionPosRegExp,protected java.lang.String exceptionPosToken,protected boolean exceptionSet,protected boolean exceptionSpaceBefore,protected boolean exceptionSpaceBeforeSet,protected boolean exceptionStringInflected,protected boolean exceptionStringNegation,protected boolean exceptionStringRegExp,protected boolean exceptionValidNext,protected boolean exceptionValidPrev,protected java.lang.StringBuilder exceptions,protected java.lang.String id,protected boolean inAndGroup,protected boolean inAntiPatternExample,protected boolean inAntiPatternForRuleGroupExample,protected boolean inCorrectExample,protected boolean inErrorTriggerExample,protected boolean inException,protected boolean inIncorrectExample,protected boolean inMarker,protected boolean inMatch,protected boolean inMessage,protected boolean inOrGroup,protected boolean inPattern,protected boolean inPhrases,protected boolean inRegex,protected boolean inRuleGroup,protected boolean inShortMessage,protected boolean inShortMessageForRuleGroup,protected boolean inSuggestion,protected boolean inToken,protected boolean inUnification,protected boolean inUnificationDef,protected boolean inUnificationNeutral,protected boolean inUrl,protected boolean inUrlForRuleGroup,protected java.lang.StringBuilder incorrectExample,protected List<org.languagetool.rules.IncorrectExample> incorrectExamples,private final Map<Triple<java.lang.String,java.lang.Boolean,java.lang.Boolean>,org.languagetool.rules.patterns.StringMatcher> internedMatchers,private final Map<Triple<java.lang.String,java.lang.Boolean,java.lang.Boolean>,org.languagetool.rules.patterns.PatternToken.PosToken> internedPos,private final Map<java.lang.String,java.lang.String> internedStrings,protected boolean isGoalSpecific,protected java.lang.String isGoalSpecificCategoryAttribute,protected java.lang.String isGoalSpecificRuleGroupAttribute,protected boolean isPremiumRule,protected org.languagetool.Language language,protected boolean lastPhrase,protected java.lang.StringBuilder match,protected int maxOccurrence,protected java.lang.StringBuilder message,protected java.util.ResourceBundle messages,protected int minOccurrence,protected int orGroupCounter,protected org.xml.sax.Locator pLocator,protected org.languagetool.rules.patterns.PatternToken patternToken,protected List<org.languagetool.rules.patterns.PatternToken> patternTokens,protected java.lang.String phraseId,protected java.lang.String phraseIdRef,protected Map<java.lang.String,List<List<org.languagetool.rules.patterns.PatternToken>>> phraseMap,protected List<ArrayList<org.languagetool.rules.patterns.PatternToken>> phrasePatternTokens,protected boolean posNegation,protected boolean posRegExp,protected java.lang.String posToken,protected java.lang.String premiumCategoryAttribute,protected java.lang.String premiumFileAttribute,protected java.lang.String premiumRuleGroupAttribute,protected int prioCategoryAttribute,protected int prioRuleAttribute,protected int prioRuleGroupAttribute,protected boolean regExpression,protected java.lang.StringBuilder regex,protected boolean regexCaseSensitive,protected org.languagetool.rules.patterns.XMLRuleHandler.RegexpMode regexMode,protected int regexpMark,protected java.lang.String ruleGroupId,protected List<java.lang.String> ruleGroupTags,protected List<java.lang.String> ruleGroupToneTags,protected List<java.lang.String> ruleTags,protected List<java.lang.String> ruleToneTags,protected List<org.languagetool.rules.patterns.AbstractPatternRule> rules,protected java.lang.StringBuilder shortMessage,protected java.lang.StringBuilder shortMessageForRuleGroup,protected int skipPos,protected int startPositionCorrection,protected List<org.languagetool.rules.patterns.Match> suggestionMatches,protected List<org.languagetool.rules.patterns.Match> suggestionMatchesOutMsg,protected java.lang.StringBuilder suggestionsOutMsg,protected int tokenCounter,protected boolean tokenInflected,protected boolean tokenLevelCaseSensitive,protected boolean tokenLevelCaseSet,protected boolean tokenNegated,protected org.languagetool.rules.patterns.Match tokenReference,protected boolean tokenSpaceBefore,protected boolean tokenSpaceBeforeSet,protected java.lang.String uFeature,protected java.lang.String uType,protected List<java.lang.String> uTypeList,protected boolean uniNegation,protected java.lang.StringBuilder url,protected java.lang.StringBuilder urlForRuleGroup,protected int xmlLineNumber,protected int xmlLineNumberAntiPattern
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/FalseFriendRuleLoader.java
|
FalseFriendRuleLoader
|
getRules
|
class FalseFriendRuleLoader extends DefaultHandler {
private final String falseFriendHint;
private final String falseFriendSugg;
public FalseFriendRuleLoader(Language motherTongue) {
ResourceBundle messages = JLanguageTool.getDataBroker().getResourceBundle(JLanguageTool.MESSAGE_BUNDLE, motherTongue.getLocale());
this.falseFriendHint = messages.getString("false_friend_hint");
this.falseFriendSugg = messages.getString("false_friend_suggestion");
}
public FalseFriendRuleLoader(String falseFriendHint, String falseFriendSugg) {
this.falseFriendHint = Objects.requireNonNull(falseFriendHint);
this.falseFriendSugg = Objects.requireNonNull(falseFriendSugg);
}
/**
* @param file XML file with false friend rules
* @since 2.3
*/
public final List<AbstractPatternRule> getRules(File file, Language language, Language motherTongue) throws IOException {
try (InputStream inputStream = new FileInputStream(file)) {
return getRules(inputStream, language, motherTongue);
} catch (ParserConfigurationException | SAXException e) {
throw new IOException("Could not load false friend rules from " + file, e);
}
}
public final List<AbstractPatternRule> getRules(InputStream stream,
Language textLanguage, Language motherTongue)
throws ParserConfigurationException, SAXException, IOException {<FILL_FUNCTION_BODY>}
}
|
FalseFriendRuleHandler handler = new FalseFriendRuleHandler(
textLanguage, motherTongue, falseFriendHint);
SAXParserFactory factory = SAXParserFactory.newInstance();
SAXParser saxParser = factory.newSAXParser();
saxParser.getXMLReader().setFeature(
"http://apache.org/xml/features/nonvalidating/load-external-dtd",
false);
saxParser.parse(stream, handler);
List<AbstractPatternRule> rules = handler.getRules();
List<AbstractPatternRule> filteredRules = new ArrayList<>();
// Add suggestions to each rule:
MessageFormat msgFormat = new MessageFormat(falseFriendSugg);
ShortDescriptionProvider descProvider = new ShortDescriptionProvider();
for (AbstractPatternRule rule : rules) {
String patternStr = rule.getPatternTokens().stream().map(k -> k.getString()).collect(Collectors.joining(" "));
List<String> suggestions = handler.getSuggestionMap().get(rule.getId());
if (suggestions != null) {
List<String> formattedSuggestions = new ArrayList<>();
for (String suggestion : suggestions) {
if (patternStr.equalsIgnoreCase(suggestion)) {
continue;
}
String desc = descProvider.getShortDescription(suggestion, textLanguage);
if (desc != null) {
formattedSuggestions.add("<suggestion>" + suggestion + "</suggestion> (" + desc + ")");
} else {
formattedSuggestions.add("<suggestion>" + suggestion + "</suggestion>");
}
}
if (formattedSuggestions.size() > 0) {
String joined = String.join(", ", formattedSuggestions);
rule.setMessage(rule.getMessage() + " " + msgFormat.format(new String[]{joined}));
filteredRules.add(rule);
}
}
}
return filteredRules;
| 385
| 490
| 875
|
<methods>public void <init>() ,public void characters(char[], int, int) throws org.xml.sax.SAXException,public void endDocument() throws org.xml.sax.SAXException,public void endElement(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void endPrefixMapping(java.lang.String) throws org.xml.sax.SAXException,public void error(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public void fatalError(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public void ignorableWhitespace(char[], int, int) throws org.xml.sax.SAXException,public void notationDecl(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void processingInstruction(java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public org.xml.sax.InputSource resolveEntity(java.lang.String, java.lang.String) throws java.io.IOException, org.xml.sax.SAXException,public void setDocumentLocator(org.xml.sax.Locator) ,public void skippedEntity(java.lang.String) throws org.xml.sax.SAXException,public void startDocument() throws org.xml.sax.SAXException,public void startElement(java.lang.String, java.lang.String, java.lang.String, org.xml.sax.Attributes) throws org.xml.sax.SAXException,public void startPrefixMapping(java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void unparsedEntityDecl(java.lang.String, java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void warning(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/IgnoreWhitespaceFilter.java
|
IgnoreWhitespaceFilter
|
acceptNode
|
class IgnoreWhitespaceFilter implements LSParserFilter {
@Override
public short acceptNode(Node nodeArg) {<FILL_FUNCTION_BODY>}
@Override
public short startElement(Element elementArg) {
return LSParserFilter.FILTER_ACCEPT;
}
@Override
public int getWhatToShow() {
return Node.NOTATION_NODE;
}
}
|
String textContent = nodeArg.getTextContent();
if (textContent.trim().isEmpty()) {
return LSParserFilter.FILTER_REJECT;
} else {
return LSParserFilter.FILTER_ACCEPT;
}
| 109
| 65
| 174
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/Match.java
|
Match
|
createState
|
class Match {
/** Possible string case conversions. **/
public enum CaseConversion {
NONE, STARTLOWER, STARTUPPER, ALLLOWER, ALLUPPER, PRESERVE, FIRSTUPPER, NOTASHKEEL
}
public enum IncludeRange {
NONE, FOLLOWING, ALL
}
private final String posTag;
private final boolean suppressMisspelled;
private final String regexReplace;
private final String posTagReplace;
private final CaseConversion caseConversionType;
private final IncludeRange includeSkipped;
// Pattern used to define parts of the matched token:
private final Pattern pRegexMatch;
// True if this match element is used for formatting POS token:
private final boolean setPos;
private boolean postagRegexp;
// True if this match element formats a statically defined lemma which is
// enclosed by the element, e.g., <match...>word</match>:
private boolean staticLemma;
private String lemma;
private int tokenRef;
// Pattern used to define parts of the matched POS token:
private Pattern pPosRegexMatch;
// True when the match is not in the suggestion:
private boolean inMessageOnly;
public Match(String posTag, String posTagReplace,
boolean postagRegexp, String regexMatch,
String regexReplace, CaseConversion caseConversionType,
boolean setPOS,
boolean suppressMisspelled,
IncludeRange includeSkipped) {
this.posTag = posTag;
this.postagRegexp = postagRegexp;
this.caseConversionType = caseConversionType;
pRegexMatch = regexMatch != null ? Pattern.compile(regexMatch) : null;
if (postagRegexp && posTag != null) {
pPosRegexMatch = Pattern.compile(posTag);
}
this.regexReplace = regexReplace;
this.posTagReplace = posTagReplace;
this.setPos = setPOS;
this.includeSkipped = includeSkipped;
this.suppressMisspelled = suppressMisspelled;
}
/**
* Creates a state used for actually matching a token.
* @since 2.3
*/
public MatchState createState(Synthesizer synthesizer, AnalyzedTokenReadings token) {<FILL_FUNCTION_BODY>}
/**
* Creates a state used for actually matching a token.
* @since 2.3
*/
public MatchState createState(Synthesizer synthesizer, AnalyzedTokenReadings[] tokens, int index, int next) {
MatchState state = new MatchState(this, synthesizer);
state.setToken(tokens, index, next);
return state;
}
/**
* Checks if the Match element is used for setting the part of speech: {@code setpos="yes"} in XML.
* @return True if Match sets POS.
*/
public boolean setsPos() {
return setPos;
}
/**
* Checks if the Match element uses regexp-based form of the POS tag.
* @return True if regexp is used in POS.
*/
public boolean posRegExp() {
return postagRegexp;
}
/**
* Sets a base form (lemma) that will be formatted, or synthesized, using the
* specified POS regular expressions.
* @param lemmaString String that specifies the base form.
*/
public void setLemmaString(String lemmaString) {
if (!StringTools.isEmpty(lemmaString)) {
lemma = lemmaString;
staticLemma = true;
postagRegexp = true;
if (posTag != null) {
pPosRegexMatch = Pattern.compile(posTag);
}
}
}
/** @since 2.3 */
public String getLemma() {
return lemma;
}
/** @since 2.3 */
public boolean isStaticLemma() {
return staticLemma;
}
/**
* Used to tell whether the Match class will spell-check the result so
* that misspelled suggestions are suppressed.
* @return True if this is so.
*/
public boolean checksSpelling() {
return suppressMisspelled;
}
/**
* Sets the token number referenced by the match.
* @param i Token number.
*/
public void setTokenRef(int i) {
tokenRef = i;
}
/**
* Gets the token number referenced by the match.
* @return token number.
*/
public int getTokenRef() {
return tokenRef;
}
/**
* Used to let LT know that it should change the case of the match.
* @return true if match converts the case of the token.
*/
public boolean convertsCase() {
return caseConversionType != CaseConversion.NONE;
}
/** @since 2.3 */
public CaseConversion getCaseConversionType() {
return caseConversionType;
}
public void setInMessageOnly(boolean inMessageOnly) {
this.inMessageOnly = inMessageOnly;
}
public boolean isInMessageOnly() {
return inMessageOnly;
}
/** @since 2.3 */
public String getPosTag() {
return posTag;
}
/** @since 2.3 */
public Pattern getRegexMatch() {
return pRegexMatch;
}
/** @since 2.3 */
public String getRegexReplace() {
return regexReplace;
}
/** @since 2.3 */
public Pattern getPosRegexMatch() {
return pPosRegexMatch;
}
/** @since 2.3 */
public boolean isPostagRegexp() {
return postagRegexp;
}
/** @since 2.3 */
public String getPosTagReplace() {
return posTagReplace;
}
/** @since 2.3 */
public IncludeRange getIncludeSkipped() {
return includeSkipped;
}
}
|
MatchState state = new MatchState(this, synthesizer);
state.setToken(token);
return state;
| 1,584
| 33
| 1,617
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/PasswordAuthenticator.java
|
PasswordAuthenticator
|
getPasswordAuthentication
|
class PasswordAuthenticator extends Authenticator {
@Override
@Nullable
protected PasswordAuthentication getPasswordAuthentication() {<FILL_FUNCTION_BODY>}
}
|
if (getRequestingURL() == null) {
return null;
}
String userInfo = getRequestingURL().getUserInfo();
if (StringTools.isEmpty(userInfo)) {
return null;
}
String[] parts = userInfo.split(":");
if (parts.length != 2) {
throw new RuntimeException("Invalid userInfo format, expected 'user:password': " + userInfo);
}
String username = parts[0];
String password = parts[1];
return new PasswordAuthentication(username, password.toCharArray());
| 47
| 145
| 192
|
<methods>public void <init>() ,public static java.net.Authenticator getDefault() ,public static java.net.PasswordAuthentication requestPasswordAuthentication(java.net.InetAddress, int, java.lang.String, java.lang.String, java.lang.String) ,public static java.net.PasswordAuthentication requestPasswordAuthentication(java.lang.String, java.net.InetAddress, int, java.lang.String, java.lang.String, java.lang.String) ,public static java.net.PasswordAuthentication requestPasswordAuthentication(java.lang.String, java.net.InetAddress, int, java.lang.String, java.lang.String, java.lang.String, java.net.URL, java.net.Authenticator.RequestorType) ,public static java.net.PasswordAuthentication requestPasswordAuthentication(java.net.Authenticator, java.lang.String, java.net.InetAddress, int, java.lang.String, java.lang.String, java.lang.String, java.net.URL, java.net.Authenticator.RequestorType) ,public java.net.PasswordAuthentication requestPasswordAuthenticationInstance(java.lang.String, java.net.InetAddress, int, java.lang.String, java.lang.String, java.lang.String, java.net.URL, java.net.Authenticator.RequestorType) ,public static synchronized void setDefault(java.net.Authenticator) <variables>private final java.lang.String key,private java.net.Authenticator.RequestorType requestingAuthType,private java.lang.String requestingHost,private int requestingPort,private java.lang.String requestingPrompt,private java.lang.String requestingProtocol,private java.lang.String requestingScheme,private java.net.InetAddress requestingSite,private java.net.URL requestingURL,private static volatile java.net.Authenticator theAuthenticator
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/PatternRuleId.java
|
PatternRuleId
|
toString
|
class PatternRuleId {
private final String id;
private final String subId;
/**
* @param id the rule id
*/
public PatternRuleId(String id) {
Validate.notEmpty(id, "id must be set");
this.id = id;
this.subId = null;
}
/**
* @param id the rule id
* @param subId the sub id of a rulegroup, starting at {@code 1}
*/
public PatternRuleId(String id, String subId) {
Validate.notEmpty(id, "id must be set");
Validate.notEmpty(subId, "subId must be set, if specified");
this.id = id;
this.subId = subId;
}
public String getId() {
return id;
}
/**
* @return a sub id or {@code null} if no sub id has been set
*/
@Nullable
public String getSubId() {
return subId;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
if (subId != null) {
return id + "[" + subId + "]";
} else {
return id;
}
| 293
| 41
| 334
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/PatternRuleLoader.java
|
PatternRuleLoader
|
getRules
|
class PatternRuleLoader extends DefaultHandler {
private boolean relaxedMode = false;
/**
* @param file XML file with pattern rules
*/
public final List<AbstractPatternRule> getRules(File file, Language lang) throws IOException {<FILL_FUNCTION_BODY>}
/**
* If set to true, don't throw an exception if id or name is not set.
* Used for online rule editor.
* @since 2.1
*/
public void setRelaxedMode(boolean relaxedMode) {
this.relaxedMode = relaxedMode;
}
/**
* @param is stream with the XML rules
* @param filename used only for verbose exception message - should refer to where the stream comes from
*/
public final List<AbstractPatternRule> getRules(InputStream is, String filename, Language lang) throws IOException {
try {
PatternRuleHandler handler = new PatternRuleHandler(filename, lang);
handler.setRelaxedMode(relaxedMode);
SAXParserFactory factory = SAXParserFactory.newInstance();
SAXParser saxParser = factory.newSAXParser();
if (JLanguageTool.isCustomPasswordAuthenticatorUsed()) {
Tools.setPasswordAuthenticator();
}
saxParser.getXMLReader().setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
saxParser.parse(is, handler);
return handler.getRules();
} catch (Exception e) {
throw new IOException("Cannot load or parse input stream of '" + filename + "'", e);
}
}
}
|
try (InputStream inputStream = Files.newInputStream(file.toPath())) {
PatternRuleLoader ruleLoader = new PatternRuleLoader();
return ruleLoader.getRules(inputStream, file.getAbsolutePath(), lang);
}
| 411
| 62
| 473
|
<methods>public void <init>() ,public void characters(char[], int, int) throws org.xml.sax.SAXException,public void endDocument() throws org.xml.sax.SAXException,public void endElement(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void endPrefixMapping(java.lang.String) throws org.xml.sax.SAXException,public void error(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public void fatalError(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public void ignorableWhitespace(char[], int, int) throws org.xml.sax.SAXException,public void notationDecl(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void processingInstruction(java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public org.xml.sax.InputSource resolveEntity(java.lang.String, java.lang.String) throws java.io.IOException, org.xml.sax.SAXException,public void setDocumentLocator(org.xml.sax.Locator) ,public void skippedEntity(java.lang.String) throws org.xml.sax.SAXException,public void startDocument() throws org.xml.sax.SAXException,public void startElement(java.lang.String, java.lang.String, java.lang.String, org.xml.sax.Attributes) throws org.xml.sax.SAXException,public void startPrefixMapping(java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void unparsedEntityDecl(java.lang.String, java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void warning(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/PatternRuleXmlCreator.java
|
PatternRuleXmlCreator
|
toXML
|
class PatternRuleXmlCreator {
/**
* Return the given pattern rule as an indented XML string.
* @since 2.3
*/
public final String toXML(PatternRuleId ruleId, Language language) {<FILL_FUNCTION_BODY>}
private Document getDocument(InputStream is) throws InstantiationException, IllegalAccessException, ClassNotFoundException {
DOMImplementationRegistry registry = DOMImplementationRegistry.newInstance();
DOMImplementationLS impl = (DOMImplementationLS)registry.getDOMImplementation("LS");
LSParser parser = impl.createLSParser(DOMImplementationLS.MODE_SYNCHRONOUS, null);
// we need to ignore whitespace here so the nodeToString() method will be able to indent it properly:
parser.setFilter(new IgnoreWhitespaceFilter());
LSInput domInput = impl.createLSInput();
domInput.setByteStream(is);
return parser.parse(domInput);
}
private String nodeToString(Node node) {
StringWriter sw = new StringWriter();
try {
Transformer t = TransformerFactory.newInstance().newTransformer();
t.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
t.transform(new DOMSource(node), new StreamResult(sw));
} catch (TransformerException e) {
throw new RuntimeException(e);
}
// We have to use our own simple indentation. as the Java transformer indentation
// introduces whitespace e.g. in the <suggestion> elements, breaking rules:
String xml = sw.toString()
.replace("<token", "\n <token")
.replace("<and", "\n <and")
.replace("</and>", "\n </and>")
.replace("<phraseref", "\n <phraseref")
.replace("<antipattern", "\n <antipattern")
.replace("<pattern", "\n <pattern")
.replace("</pattern", "\n </pattern")
.replace("</antipattern", "\n </antipattern")
.replace("</rule>", "\n</rule>")
.replace("<filter", "\n <filter")
.replace("<message", "\n <message")
.replace("<short", "\n <short")
.replace("<url", "\n <url")
.replace("<example", "\n <example")
.replace("</suggestion><suggestion>", "</suggestion>\n <suggestion>")
.replace("</message><suggestion>", "</message>\n <suggestion>");
return xml;
}
}
|
List<String> filenames = language.getRuleFileNames();
XPath xpath = XPathFactory.newInstance().newXPath();
for (String filename : filenames) {
try (InputStream is = JLanguageTool.getDataBroker().getAsStream(filename)) {
Document doc = getDocument(is);
Node ruleNode = (Node) xpath.evaluate("/rules/category/rule[@id='" + ruleId.getId() + "']", doc, XPathConstants.NODE);
if (ruleNode != null) {
return nodeToString(ruleNode);
}
Node ruleNodeInGroup = (Node) xpath.evaluate("/rules/category/rulegroup/rule[@id='" + ruleId.getId() + "']", doc, XPathConstants.NODE);
if (ruleNodeInGroup != null) {
return nodeToString(ruleNodeInGroup);
}
if (ruleId.getSubId() != null) {
NodeList ruleGroupNodes = (NodeList) xpath.evaluate("/rules/category/rulegroup[@id='" + ruleId.getId() + "']/rule", doc, XPathConstants.NODESET);
if (ruleGroupNodes != null) {
for (int i = 0; i < ruleGroupNodes.getLength(); i++) {
if (Integer.toString(i+1).equals(ruleId.getSubId())) {
return nodeToString(ruleGroupNodes.item(i));
}
}
}
} else {
Node ruleGroupNode = (Node) xpath.evaluate("/rules/category/rulegroup[@id='" + ruleId.getId() + "']", doc, XPathConstants.NODE);
if (ruleGroupNode != null) {
return nodeToString(ruleGroupNode);
}
}
} catch (Exception e) {
throw new RuntimeException("Could not turn rule '" + ruleId + "' for language " + language + " into a string", e);
}
}
throw new RuntimeException("Could not find rule '" + ruleId + "' for language " + language + " in files: " + filenames);
| 674
| 528
| 1,202
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/PatternTokenBuilder.java
|
PatternTokenBuilder
|
build
|
class PatternTokenBuilder {
private String token;
private String posTag;
private boolean marker = true;
private boolean matchInflectedForms = false;
private boolean caseSensitive;
private boolean regexp;
private boolean negation;
private boolean isWhiteSpaceSet = false;
private boolean isWhiteSpaceBefore;
private int minOccurrence = 1;
private int maxOccurrence = 1;
private int skip;
private String tokenException;
/**
* Add a case-insensitive token.
*/
public PatternTokenBuilder token(String token) {
this.token = Objects.requireNonNull(token);
return this;
}
/**
* Add a case-sensitive token.
* @since 3.3
*/
public PatternTokenBuilder csToken(String token) {
this.token = Objects.requireNonNull(token);
caseSensitive = true;
return this;
}
public PatternTokenBuilder tokenRegex(String token) {
this.token = Objects.requireNonNull(token);
regexp = true;
return this;
}
public PatternTokenBuilder csTokenRegex(String token) {
this.token = Objects.requireNonNull(token);
regexp = true;
caseSensitive = true;
return this;
}
public PatternTokenBuilder pos(String posTag) {
return pos(posTag, false);
}
public PatternTokenBuilder posRegex(String posTag) {
return pos(posTag, true);
}
/** @since 4.9 */
public PatternTokenBuilder min(int val) {
if (val < 0) {
throw new IllegalArgumentException("minOccurrence must be >= 0: " + minOccurrence);
}
minOccurrence = val;
return this;
}
/** @since 4.9 */
public PatternTokenBuilder max(int val) {
maxOccurrence = val;
return this;
}
/**
* Corresponds to {@code <marker>...</marker>} in XML. Note that there
* can be more tokens with a mark, but then must all be adjacent.
* @since 4.6
*/
public PatternTokenBuilder mark(boolean isMarked) {
this.marker = isMarked;
return this;
}
public PatternTokenBuilder posRegexWithStringException(String posTag, String tokenExceptionRegex) {
return pos(posTag, true, tokenExceptionRegex);
}
private PatternTokenBuilder pos(String posTag, boolean regexp) {
this.posTag = Objects.requireNonNull(posTag);
this.regexp = regexp;
return this;
}
private PatternTokenBuilder pos(String posTag, boolean regexp, String tokenExceptionRegex) {
this.posTag = Objects.requireNonNull(posTag);
this.regexp = regexp;
this.tokenException = tokenExceptionRegex;
return this;
}
/** @since 3.3 */
public PatternTokenBuilder negate() {
this.negation = true;
return this;
}
/** @since 4.0 */
public PatternTokenBuilder setSkip(int skip) {
this.skip = skip;
return this;
}
/** @since 4.4 */
public PatternTokenBuilder setIsWhiteSpaceBefore(boolean whiteSpaceBefore) {
this.isWhiteSpaceBefore = whiteSpaceBefore;
this.isWhiteSpaceSet = true;
return this;
}
/**
* Also match inflected forms of the given word - note this will only work when the
* given token actually is a baseform.
* @since 3.8
*/
public PatternTokenBuilder matchInflectedForms() {
matchInflectedForms = true;
return this;
}
public PatternToken build() {<FILL_FUNCTION_BODY>}
}
|
PatternToken patternToken;
patternToken = new PatternToken(token, caseSensitive, regexp, matchInflectedForms);
if (posTag != null) {
patternToken.setPosToken(new PatternToken.PosToken(posTag, regexp, false));
}
if (isWhiteSpaceSet) {
patternToken.setWhitespaceBefore(isWhiteSpaceBefore);
}
if (maxOccurrence < minOccurrence) {
throw new IllegalArgumentException("minOccurrence must <= maxOccurrence: minOccurrence " + minOccurrence + ", maxOccurrence " + maxOccurrence);
}
if (tokenException != null) {
patternToken.setStringPosException(tokenException, true, false, false, false, false, null, false, false, false);
}
patternToken.setMinOccurrence(minOccurrence);
patternToken.setMaxOccurrence(maxOccurrence);
patternToken.setNegation(negation);
patternToken.setSkipNext(skip);
patternToken.setInsideMarker(marker);
return patternToken;
| 1,037
| 282
| 1,319
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/PatternTokenMatcher.java
|
PatternTokenMatcher
|
addMemberAndGroup
|
class PatternTokenMatcher {
private final PatternToken basePatternToken;
private PatternToken patternToken;
private List<PatternTokenMatcher> andGroup;
private boolean[] andGroupCheck;
public PatternTokenMatcher(PatternToken patternToken) {
basePatternToken = patternToken;
this.patternToken = basePatternToken;
if (basePatternToken.hasAndGroup()) {
List<PatternToken> patternTokenAndGroup = basePatternToken.getAndGroup();
andGroup = new ArrayList<>(patternTokenAndGroup.size());
for (PatternToken el : patternTokenAndGroup) {
PatternTokenMatcher matcher = new PatternTokenMatcher(el);
andGroup.add(matcher);
}
}
}
public void resolveReference(int firstMatchToken,
AnalyzedTokenReadings[] tokens, Language language) throws IOException {
if (basePatternToken.isReferenceElement()) {
int refPos = firstMatchToken + basePatternToken.getMatch().getTokenRef();
if (refPos < tokens.length) {
patternToken = basePatternToken.compile(tokens[refPos], language.getSynthesizer());
}
}
}
public PatternToken getPatternToken() {
return basePatternToken;
}
/**
* Checks whether the rule element matches the token given as a parameter.
*
* @param token AnalyzedToken to check matching against
* @return True if token matches, false otherwise.
*/
public final boolean isMatched(AnalyzedToken token) {
boolean matched = patternToken.isMatched(token);
if (patternToken.hasAndGroup()) {
andGroupCheck[0] |= matched;
}
return matched;
}
void prepareAndGroup(int firstMatchToken, AnalyzedTokenReadings[] tokens, Language language) throws IOException {
if (basePatternToken.hasAndGroup()) {
for (PatternTokenMatcher andMatcher : andGroup) {
andMatcher.resolveReference(firstMatchToken, tokens, language);
}
andGroupCheck = new boolean[patternToken.getAndGroup().size() + 1];
Arrays.fill(andGroupCheck, false);
}
}
/**
* Enables testing multiple conditions specified by different elements.
* Doesn't test exceptions.
*
* Works as logical AND operator only if preceded with
* {@link #prepareAndGroup(int, AnalyzedTokenReadings[], Language)}, and followed by {@link #checkAndGroup(boolean)}
*
* @param token the token checked.
*/
public final void addMemberAndGroup(AnalyzedToken token) {<FILL_FUNCTION_BODY>}
public final boolean checkAndGroup(boolean previousValue) {
if (patternToken.hasAndGroup()) {
boolean allConditionsMatch = true;
for (boolean testValue : andGroupCheck) {
allConditionsMatch &= testValue;
}
return allConditionsMatch;
}
return previousValue;
}
public final boolean isMatchedByScopeNextException(AnalyzedToken token) {
return patternToken.isMatchedByScopeNextException(token);
}
public final boolean isExceptionMatchedCompletely(AnalyzedToken token) {
return patternToken.isExceptionMatchedCompletely(token);
}
public boolean hasPreviousException() {
return patternToken.hasPreviousException();
}
public boolean isMatchedByPreviousException(AnalyzedTokenReadings token) {
return patternToken.isMatchedByPreviousException(token);
}
@Override
public String toString() {
return "PatternTokenMatcher for " + basePatternToken;
}
}
|
if (patternToken.hasAndGroup()) {
List<PatternTokenMatcher> andGroupList = andGroup;
for (int i = 0; i < andGroupList.size(); i++) {
if (!andGroupCheck[i + 1]) {
PatternTokenMatcher testAndGroup = andGroupList.get(i);
if (testAndGroup.isMatched(token)) {
andGroupCheck[i + 1] = true;
}
}
}
}
| 928
| 124
| 1,052
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/RegexAntiPatternFilter.java
|
RegexAntiPatternFilter
|
acceptRuleMatch
|
class RegexAntiPatternFilter extends RegexRuleFilter {
@Nullable
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> arguments, AnalyzedSentence sentenceObj, Matcher patternMatcher) {<FILL_FUNCTION_BODY>}
}
|
String antiPatternStr = arguments.get("antipatterns");
if (antiPatternStr == null) {
throw new RuntimeException("Missing 'antiPatterns:' in 'args' in <filter> of rule " + match.getRule().getFullId());
}
String[] antiPatterns = antiPatternStr.split("\\|");
for (String antiPattern : antiPatterns) {
Pattern p = Pattern.compile(antiPattern);
Matcher matcher = p.matcher(sentenceObj.getText());
if (matcher.find()) {
// partial overlap is enough to filter out a match:
if (matcher.start() <= match.getToPos() && matcher.end() >= match.getToPos() ||
matcher.start() <= match.getFromPos() && matcher.end() >= match.getFromPos()) {
return null;
}
}
}
return match;
| 72
| 229
| 301
|
<methods>public non-sealed void <init>() ,public abstract org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, org.languagetool.AnalyzedSentence, java.util.regex.Matcher) <variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/RegexRuleFilterCreator.java
|
RegexRuleFilterCreator
|
getFilter
|
class RegexRuleFilterCreator {
/**
* @param className fully qualified class Name of a class implementing {@link RegexRuleFilter}
*/
public RegexRuleFilter getFilter(String className) {<FILL_FUNCTION_BODY>}
}
|
try {
Class<?> aClass = JLanguageTool.getClassBroker().forName(className);
Constructor<?>[] constructors = aClass.getConstructors();
if (constructors.length != 1) {
throw new RuntimeException("Constructor of filter class '"
+ className + "' must have exactly one constructor, but it has " + constructors.length);
}
Constructor<?> constructor = constructors[0];
try {
if (constructor.getParameterTypes().length != 0) {
throw new RuntimeException("Constructor of filter class '" + className + "' must not have arguments: " + constructor);
}
Object filter = constructor.newInstance();
if (filter instanceof RegexRuleFilter) {
return (RegexRuleFilter) filter;
} else {
throw new RuntimeException("Filter class '" + className + "' must implement interface " + RegexRuleFilter.class.getSimpleName());
}
} catch (Exception e) {
throw new RuntimeException("Could not create filter class using constructor " + constructor, e);
}
} catch (ClassNotFoundException e) {
throw new RuntimeException("Could not find filter class: '"
+ className + "' - make sure to use a fully qualified class name like 'org.languagetool.rules.MyFilter'");
}
| 67
| 323
| 390
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/RegexRuleFilterEvaluator.java
|
RegexRuleFilterEvaluator
|
getResolvedArguments
|
class RegexRuleFilterEvaluator {
private final RegexRuleFilter filter;
public RegexRuleFilterEvaluator(RegexRuleFilter filter) {
this.filter = filter;
}
@Nullable
public RuleMatch runFilter(String filterArgs, RuleMatch ruleMatch, AnalyzedSentence sentenceObj, Matcher patternMatcher) {
Map<String,String> args = getResolvedArguments(filterArgs);
return filter.acceptRuleMatch(ruleMatch, args, sentenceObj, patternMatcher);
}
private Map<String,String> getResolvedArguments(String filterArgs) {<FILL_FUNCTION_BODY>}
}
|
Map<String,String> result = new HashMap<>();
String[] arguments = filterArgs.split("\\s+");
for (String arg : arguments) {
int delimPos = arg.indexOf(':');
if (delimPos == -1) {
throw new RuntimeException("Invalid syntax for key/value, expected 'key:value', got: '" + arg + "'");
}
String key = arg.substring(0, delimPos);
String val = arg.substring(delimPos + 1);
result.put(key, val);
}
return result;
| 164
| 149
| 313
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/RepeatedPatternRuleTransformer.java
|
RepeatedPatternRule
|
match
|
class RepeatedPatternRule extends TextLevelRule {
protected final Language ruleLanguage;
RepeatedPatternRule(List<AbstractPatternRule> rules, Language lang) {
this.rules = Collections.unmodifiableList(rules);
this.ruleLanguage = lang;
setPremium(rules.stream().anyMatch(r -> r.isPremium()));
}
private final List<AbstractPatternRule> rules;
public List<AbstractPatternRule> getWrappedRules() {
return rules;
}
@Override
public String getId() {
return rules.get(0).getId();
}
@Override
public String getDescription() {
return rules.get(0).getDescription();
}
@Override
public RuleMatch[] match(List<AnalyzedSentence> sentences) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public int minToCheckParagraph() {
// TODO: what should we use here? calculate based on min_prev_matches?
return 0;
}
@Override
public boolean supportsLanguage(Language language) {
return language.equalsConsiderVariantsIfSpecified(this.ruleLanguage);
}
private boolean isDistanceValid(List<Integer> distancesBetweenMatches, int maxDistanceTokens, int minPrevMatches) {
int i = 0;
int size = distancesBetweenMatches.size();
int distance = 0;
while (i < minPrevMatches && i < size) {
distance += distancesBetweenMatches.get(size - 1 - i);
i++;
}
return distance < maxDistanceTokens;
}
}
|
List<RuleMatch> matches = new ArrayList<>();
int offsetChars = 0;
int offsetTokens = 0;
int prevFromToken = 0;
int prevMatches = 0;
List<Integer> distancesBetweenMatches = new ArrayList<>();
// we need to adjust offsets since each pattern rule returns offsets relative to the sentence, not text
for (AnalyzedSentence s : sentences) {
List<RuleMatch> sentenceMatches = new ArrayList<>();
for (AbstractPatternRule rule : rules) {
RuleMatch[] ruleMatches = rule.match(s);
sentenceMatches.addAll(Arrays.asList(ruleMatches));
}
sentenceMatches = new SameRuleGroupFilter().filter(sentenceMatches);
// no sorting: SameRuleGroupFilter sorts rule matches already
int sentenceLenghtTokens = s.getTokensWithoutWhitespace().length;
for (RuleMatch m : sentenceMatches) {
int fromToken = 0;
while (fromToken < sentenceLenghtTokens
&& s.getTokensWithoutWhitespace()[fromToken].getStartPos() < m.getFromPos()) {
fromToken++;
}
fromToken += offsetTokens;
int fromPos = m.getFromPos() + offsetChars;
int toPos = m.getToPos() + offsetChars;
m.setOffsetPosition(fromPos, toPos);
int maxDistanceTokens = m.getRule().getDistanceTokens();
if (maxDistanceTokens < 1) {
maxDistanceTokens = defaultMaxDistanceTokens * m.getRule().getMinPrevMatches();
}
distancesBetweenMatches.add(fromToken - prevFromToken);
if (prevMatches >= m.getRule().getMinPrevMatches()
&& isDistanceValid(distancesBetweenMatches, maxDistanceTokens, m.getRule().getMinPrevMatches())) {
matches.add(m);
}
prevFromToken = fromToken;
prevMatches++;
}
offsetChars += s.getText().length();
offsetTokens += sentenceLenghtTokens - 1; // -1 -> not counting SENT_START
}
return matches.toArray(new RuleMatch[0]);
| 426
| 564
| 990
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/RuleFilter.java
|
FakeRule
|
getRequired
|
class FakeRule extends Rule {
@Override public String getId() { return "FAKE-RULE-FOR-FILTER"; }
@Override public String getDescription() { return "<none>"; }
@Override public RuleMatch[] match(AnalyzedSentence sentence) throws IOException { return new RuleMatch[0]; }
}
protected String getRequired(String key, Map<String, String> map) {<FILL_FUNCTION_BODY>
|
String result = map.get(key);
if (result == null) {
throw new IllegalArgumentException("Missing key '" + key + "'");
}
return result;
| 111
| 46
| 157
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/RuleFilterCreator.java
|
RuleFilterCreator
|
getFilter
|
class RuleFilterCreator {
/**
* @param className fully qualified class Name of a class implementing {@link RuleFilter}
*/
public RuleFilter getFilter(String className) {<FILL_FUNCTION_BODY>}
}
|
try {
Class<?> aClass = JLanguageTool.getClassBroker().forName(className);
Constructor<?>[] constructors = aClass.getConstructors();
if (constructors.length != 1) {
throw new RuntimeException("Constructor of filter class '"
+ className + "' must have exactly one constructor, but it has " + constructors.length);
}
Constructor<?> constructor = constructors[0];
try {
if (constructor.getParameterTypes().length != 0) {
throw new RuntimeException("Constructor of filter class '" + className + "' must not have arguments: " + constructor);
}
Object filter = constructor.newInstance();
if (filter instanceof RuleFilter) {
return (RuleFilter) filter;
} else {
throw new RuntimeException("Filter class '" + className + "' must implement interface " + RuleFilter.class.getSimpleName());
}
} catch (Exception e) {
throw new RuntimeException("Could not create filter class using constructor " + constructor, e);
}
} catch (ClassNotFoundException e) {
throw new RuntimeException("Could not find filter class: '"
+ className + "' - make sure to use a fully qualified class name like 'org.languagetool.rules.MyFilter'");
}
| 61
| 317
| 378
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/RuleFilterEvaluator.java
|
RuleFilterEvaluator
|
getResolvedArguments
|
class RuleFilterEvaluator {
private static final Pattern WHITESPACE = Pattern.compile("\\s+");
private final RuleFilter filter;
public RuleFilterEvaluator(RuleFilter filter) {
this.filter = filter;
}
@Nullable
public RuleMatch runFilter(String filterArgs, RuleMatch ruleMatch, AnalyzedTokenReadings[] patternTokens, int patternTokenPos, List<Integer> tokenPositions) throws IOException {
Map<String,String> args = getResolvedArguments(filterArgs, patternTokens, patternTokenPos, tokenPositions);
return filter.acceptRuleMatch(ruleMatch, args, patternTokenPos, patternTokens, tokenPositions);
}
/**
* Resolves the backref arguments, e.g. replaces {@code \1} by the value of the first token in the pattern.
*/
public Map<String,String> getResolvedArguments(String filterArgs, AnalyzedTokenReadings[] patternTokens, int patternTokenPos, List<Integer> tokenPositions) {<FILL_FUNCTION_BODY>}
// when there's a 'skip', we need to adapt the reference number
private int getSkipCorrectedReference(List<Integer> tokenPositions, int refNumber) {
int correctedRef = 0;
int i = 0;
for (int tokenPosition : tokenPositions) {
if (i++ >= refNumber) {
break;
}
correctedRef += tokenPosition;
}
return correctedRef - 1;
}
}
|
Map<String,String> result = new HashMap<>();
String[] arguments = WHITESPACE.split(filterArgs);
for (String arg : arguments) {
int delimPos = arg.indexOf(':');
if (delimPos == -1) {
throw new RuntimeException("Invalid syntax for key/value, expected 'key:value', got: '" + arg + "'");
}
String key = arg.substring(0, delimPos);
String val = arg.substring(delimPos + 1);
if (val.startsWith("\\")) {
int refNumber = Integer.parseInt(val.replace("\\", ""));
if (refNumber > tokenPositions.size()) {
throw new RuntimeException("Your reference number " + refNumber + " is bigger than the number of tokens: " + tokenPositions.size());
}
int correctedRef = getSkipCorrectedReference(tokenPositions, refNumber);
if (correctedRef >= patternTokens.length) {
throw new RuntimeException("Your reference number " + refNumber +
" is bigger than number of matching tokens: " + patternTokens.length);
}
if (result.containsKey(key)) {
throw new RuntimeException("Duplicate key '" + key + "'");
}
result.put(key, patternTokens[correctedRef].getToken());
} else {
result.put(key, val);
}
}
return result;
| 382
| 362
| 744
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/RuleSet.java
|
RuleSet
|
rulesForSentence
|
class RuleSet {
private volatile Set<String> ruleIds;
/**
* @return all rules in this set, not filtered
*/
public abstract List<Rule> allRules();
/**
* @return all rules from {@link #allRules} that might be applicable to the given sentence.
*/
public abstract List<Rule> rulesForSentence(AnalyzedSentence sentence);
/**
* @return the ids of {@link #allRules()}
* @since 5.6
*/
public Set<String> allRuleIds() {
Set<String> result = ruleIds;
if (result == null) {
ruleIds = result = Collections.unmodifiableSet(allRules().stream().map(Rule::getId).collect(Collectors.toSet()));
}
return result;
}
/**
* @return a simple RuleSet that returns all the rules from {@link #rulesForSentence}
*/
public static RuleSet plain(List<Rule> rules) {
List<Rule> allRules = Collections.unmodifiableList(rules);
return new RuleSet() {
@Override
public List<Rule> allRules() {
return allRules;
}
@Override
public List<Rule> rulesForSentence(AnalyzedSentence sentence) {
return allRules;
}
};
}
/**
* @return a RuleSet whose {@link #rulesForSentence} excludes rules requiring token texts or lemmas
* that don't occur in the given sentence
*/
public static RuleSet textLemmaHinted(List<? extends Rule> rules) {
return hinted(rules, true);
}
/**
* @return a RuleSet whose {@link #rulesForSentence} excludes rules requiring token texts
* that don't occur in the given sentence.
*/
public static RuleSet textHinted(List<? extends Rule> rules) {
return hinted(rules, false);
}
private static RuleSet hinted(List<? extends Rule> rules, boolean withLemmaHints) {
List<Rule> allRules = Collections.unmodifiableList(rules);
Map<String, BitSet> byToken = new HashMap<>();
Map<String, BitSet> byLemma = new HashMap<>();
BitSet unclassified = new BitSet();
for (int i = 0; i < allRules.size(); i++) {
Rule rule = allRules.get(i);
boolean classified = false;
if (rule instanceof AbstractTokenBasedRule) {
AbstractTokenBasedRule.TokenHint[] tokenHints = ((AbstractTokenBasedRule) rule).tokenHints;
AbstractTokenBasedRule.TokenHint firstHint =
tokenHints == null ? null :
withLemmaHints ? tokenHints[0] :
Arrays.stream(tokenHints).filter(th -> !th.inflected).findFirst().orElse(null);
if (firstHint != null) {
classified = true;
Map<String, BitSet> map = firstHint.inflected ? byLemma : byToken;
for (String hint : firstHint.lowerCaseValues) {
map.computeIfAbsent(hint, __ -> new BitSet()).set(i);
}
}
}
if (!classified) {
unclassified.set(i);
}
}
return new RuleSet() {
@Override
public List<Rule> allRules() {
return allRules;
}
@Override
public List<Rule> rulesForSentence(AnalyzedSentence sentence) {<FILL_FUNCTION_BODY>}
};
}
@ApiStatus.Internal
public static <T> List<T> filterList(BitSet includedIndices, List<T> list) {
List<T> result = new ArrayList<>();
int i = -1;
while (true) {
i = includedIndices.nextSetBit(i + 1);
if (i < 0) break;
result.add(list.get(i));
}
return result;
}
}
|
BitSet included = new BitSet();
included.or(unclassified);
if (!byLemma.isEmpty()) {
for (String lemma : sentence.getLemmaSet()) {
BitSet set = byLemma.get(lemma);
if (set != null) {
included.or(set);
}
}
}
for (String token : sentence.getTokenSet()) {
BitSet set = byToken.get(token);
if (set != null) {
included.or(set);
}
}
return filterList(included, allRules);
| 1,050
| 151
| 1,201
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/Substrings.java
|
Substrings
|
checkCanReplaceRegex
|
class Substrings {
final String[] substrings;
private final int minLength;
/** Whether the first of {@link #substrings} must occur at the matched fragment start */
final boolean mustStart;
/** Whether the last of {@link #substrings} must occur at the matched fragment end */
final boolean mustEnd;
Substrings(boolean mustStart, boolean mustEnd, String[] substrings) {
this(mustStart, mustEnd, substrings, Arrays.stream(substrings).mapToInt(String::length).sum());
}
private Substrings(boolean mustStart, boolean mustEnd, String[] substrings, int minLength) {
this.substrings = substrings;
this.mustStart = mustStart;
this.mustEnd = mustEnd;
this.minLength = minLength;
}
/**
* @return a version of this {@code Substrings} object
* whose {@link #matches} could completely replace the given regexp,
* or {@code null} if that's not feasible.
*/
@Nullable
Substrings checkCanReplaceRegex(String regexp) {<FILL_FUNCTION_BODY>}
@Override
public String toString() {
return (mustStart ? "[" : "(") + String.join(", ", substrings) + (mustEnd ? "]" : ")");
}
Substrings concat(Substrings another) {
String[] substrings;
if (another.substrings.length == 0) {
substrings = this.substrings;
} else if (this.substrings.length == 0) {
substrings = another.substrings;
} else if (mustEnd && another.mustStart) {
substrings = new String[this.substrings.length + another.substrings.length - 1];
System.arraycopy(this.substrings, 0, substrings, 0, this.substrings.length - 1);
substrings[this.substrings.length - 1] = this.substrings[this.substrings.length - 1] + another.substrings[0];
System.arraycopy(another.substrings, 0, substrings, this.substrings.length, another.substrings.length - 1);
} else {
substrings = new String[this.substrings.length + another.substrings.length];
System.arraycopy(this.substrings, 0, substrings, 0, this.substrings.length);
System.arraycopy(another.substrings, 0, substrings, this.substrings.length, another.substrings.length);
}
return new Substrings(mustStart, another.mustEnd, substrings);
}
/**
* Check whether {@code text} contains all required substrings and returns the index of the first of them,
* or -1 if the text doesn't contain these substrings.
*/
int find(String text, boolean caseSensitive) {
if (text.length() < minLength) {
return -1;
}
int start = indexOf(text, substrings[0], caseSensitive, 0);
if (start < 0) {
return -1;
}
if (substrings.length > 1 && !containsSubstrings(text, caseSensitive, start + substrings[0].length(), 1)) {
return -1;
}
return start;
}
/**
* @return whether the given text contains all the required substrings
*/
boolean matches(String text, boolean caseSensitive) {
if (text.length() < minLength) {
return false;
}
if (mustStart && !text.regionMatches(!caseSensitive, 0, substrings[0], 0, substrings[0].length())) {
return false;
}
if (mustEnd) {
String last = substrings[substrings.length - 1];
if (!text.regionMatches(!caseSensitive, text.length() - last.length(), last, 0, last.length())) {
return false;
}
}
if (substrings.length == 1 && (mustStart || mustEnd)) {
return true;
}
return containsSubstrings(text, caseSensitive, mustStart ? substrings[0].length() : 0, mustStart ? 1 : 0);
}
private boolean containsSubstrings(String text, boolean caseSensitive, int textPos, int firstSubstringIndex) {
for (int i = firstSubstringIndex; i < substrings.length; i++) {
textPos = indexOf(text, substrings[i], caseSensitive, textPos);
if (textPos < 0) {
return false;
}
textPos += substrings[i].length();
}
return true;
}
private static int indexOf(String text, String substring, boolean caseSensitive, int from) {
return caseSensitive ? text.indexOf(substring, from) : indexOfIgnoreCase(text, substring, from);
}
// a bit more optimized than Apache StringUtil.indexOfIgnoreCase
private static int indexOfIgnoreCase(String text, String substring, int from) {
char first = substring.charAt(0);
char up = Character.toUpperCase(first);
char low = Character.toLowerCase(first);
boolean cased = up != first || low != first;
while (true) {
from = cased ? findNext(text, from, up, low) : text.indexOf(first, from);
if (from < 0) {
return -1;
}
if (text.regionMatches(true, from, substring, 0, substring.length())) {
return from;
}
from++;
}
}
private static int findNext(String text, int from, char up, char low) {
for (int i = from; i < text.length(); i++) {
char c = text.charAt(i);
if (c == up || c == low || Character.toUpperCase(c) == up || Character.toLowerCase(c) == low) {
return i;
}
}
return -1;
}
}
|
if (mustStart || mustEnd) {
String prefix = mustStart ? substrings[0] : "";
String suffix = mustEnd ? substrings[substrings.length - 1] : "";
if (regexp.startsWith(prefix) &&
regexp.endsWith(suffix) &&
regexp.length() == prefix.length() + suffix.length() + 2 &&
regexp.charAt(prefix.length()) == '.') {
switch (regexp.charAt(prefix.length() + 1)) {
case '*': return this;
case '+': return new Substrings(mustStart, mustEnd, substrings, minLength + 1);
}
}
}
if (regexp.equals((mustStart ? "" : ".*") + String.join(".*", substrings) + (mustEnd ? "" : ".*"))) {
return this;
}
return null;
| 1,550
| 225
| 1,775
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/UnifierConfiguration.java
|
UnifierConfiguration
|
setEquivalence
|
class UnifierConfiguration {
/**
* A Map for storing the equivalence types for features. Features are
* specified as Strings, and map into types defined as maps from Strings to
* Elements.
*/
private final Map<EquivalenceTypeLocator, PatternToken> equivalenceTypes;
/**
* A Map that stores all possible equivalence types listed for features.
*/
private final Map<String, List<String>> equivalenceFeatures;
public UnifierConfiguration() {
// workaround for issue #13
equivalenceTypes = new ConcurrentHashMap<>();
equivalenceFeatures = new ConcurrentHashMap<>();
}
/**
* Prepares equivalence types for features to be tested. All equivalence
* types are given as {@link PatternToken}s. They create an equivalence set (with
* abstraction).
*
* @param feature Feature to be tested, like gender, grammatical case or number.
* @param type Type of equivalence for the feature, for example plural, first person, genitive.
* @param elem Element specifying the equivalence.
*/
public final void setEquivalence(String feature, String type,
PatternToken elem) {<FILL_FUNCTION_BODY>}
public Map<EquivalenceTypeLocator, PatternToken> getEquivalenceTypes() {
return Collections.unmodifiableMap(equivalenceTypes);
}
public Map<String, List<String>> getEquivalenceFeatures() {
return Collections.unmodifiableMap(equivalenceFeatures);
}
public Unifier createUnifier() {
return new Unifier(getEquivalenceTypes(), getEquivalenceFeatures());
}
}
|
EquivalenceTypeLocator typeKey = new EquivalenceTypeLocator(feature, type);
if (equivalenceTypes.containsKey(typeKey)) {
return;
}
equivalenceTypes.put(typeKey, elem);
List<String> lTypes;
if (equivalenceFeatures.containsKey(feature)) {
lTypes = equivalenceFeatures.get(feature);
} else {
// workaround for issue #13
lTypes = new CopyOnWriteArrayList<>();
equivalenceFeatures.put(feature, lTypes);
}
lTypes.add(type);
| 413
| 150
| 563
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/bitext/BitextPatternRule.java
|
BitextPatternRule
|
match
|
class BitextPatternRule extends BitextRule {
private final AbstractPatternRule srcRule;
private final AbstractPatternRule trgRule;
BitextPatternRule(AbstractPatternRule src, AbstractPatternRule trg) {
srcRule = src;
trgRule = trg;
}
public AbstractPatternRule getSrcRule() {
return srcRule;
}
public AbstractPatternRule getTrgRule() {
return trgRule;
}
@Override
public String getDescription() {
return srcRule.getDescription();
}
@Override
public String getMessage() {
return trgRule.getMessage();
}
@Override
public String getId() {
return srcRule.getId();
}
/**
* This method always returns an empty array.
* Use {@link #match(org.languagetool.AnalyzedSentence, org.languagetool.AnalyzedSentence)} instead.
*/
@Override
public RuleMatch[] match(AnalyzedSentence sentence) throws IOException {
return new RuleMatch[0];
}
@Override
public RuleMatch[] match(AnalyzedSentence sourceSentence,
AnalyzedSentence targetSentence) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (srcRule.match(sourceSentence).length > 0) {
return trgRule.match(targetSentence);
}
return new RuleMatch[0];
| 338
| 47
| 385
|
<methods>public non-sealed void <init>() ,public final List<org.languagetool.bitext.StringPair> getCorrectBitextExamples() ,public final List<org.languagetool.rules.bitext.IncorrectBitextExample> getIncorrectBitextExamples() ,public abstract java.lang.String getMessage() ,public static List<Class<? extends org.languagetool.rules.bitext.BitextRule>> getRelevantRules() ,public final org.languagetool.Language getSourceLanguage() ,public abstract org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence, org.languagetool.AnalyzedSentence) throws java.io.IOException,public org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public final void setCorrectBitextExamples(List<org.languagetool.bitext.StringPair>) ,public final void setIncorrectBitextExamples(List<org.languagetool.rules.bitext.IncorrectBitextExample>) ,public final void setSourceLanguage(org.languagetool.Language) <variables>private List<org.languagetool.bitext.StringPair> correctExamples,private List<org.languagetool.rules.bitext.IncorrectBitextExample> incorrectExamples,private org.languagetool.Language sourceLanguage
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/bitext/BitextPatternRuleHandler.java
|
BitextPatternRuleHandler
|
setExample
|
class BitextPatternRuleHandler extends PatternRuleHandler {
private static final String SOURCE = "source";
private static final String TARGET = "target";
private static final String SRC_EXAMPLE = "srcExample";
private static final String TRG_EXAMPLE = "trgExample";
private final List<BitextPatternRule> rules = new ArrayList<>();
private PatternRule srcRule;
private PatternRule trgRule;
private IncorrectExample trgExample;
private IncorrectExample srcExample;
private Language srcLang;
private List<StringPair> correctExamples = new ArrayList<>();
private List<IncorrectBitextExample> incorrectExamples = new ArrayList<>();
List<BitextPatternRule> getBitextRules() {
return rules;
}
// ===========================================================
// SAX DocumentHandler methods
// ===========================================================
@Override
public void startElement(String namespaceURI, String lName,
String qName, Attributes attrs) throws SAXException {
switch (qName) {
case RULES:
String languageStr = attrs.getValue("targetLang");
language = Languages.getLanguageForShortCode(languageStr);
break;
case RULE:
super.startElement(namespaceURI, lName, qName, attrs);
correctExamples = new ArrayList<>();
incorrectExamples = new ArrayList<>();
break;
case TARGET:
startPattern(attrs);
break;
case SOURCE:
srcLang = Languages.getLanguageForShortCode(attrs.getValue("lang"));
break;
default:
super.startElement(namespaceURI, lName, qName, attrs);
break;
}
}
@Override
public void endElement(String namespaceURI, String sName,
String qName) throws SAXException {
switch (qName) {
case RULE:
trgRule.setMessage(message.toString());
for (Match m : suggestionMatches) {
trgRule.addSuggestionMatch(m);
}
if (phrasePatternTokens.size() <= 1) {
suggestionMatches.clear();
}
BitextPatternRule bRule = new BitextPatternRule(srcRule, trgRule);
bRule.setCorrectBitextExamples(correctExamples);
bRule.setIncorrectBitextExamples(incorrectExamples);
bRule.setSourceLanguage(srcLang);
rules.add(bRule);
break;
case SRC_EXAMPLE:
srcExample = setExample();
break;
case TRG_EXAMPLE:
trgExample = setExample();
break;
case SOURCE:
srcRule = finalizeRule();
break;
case TARGET:
trgRule = finalizeRule();
break;
case EXAMPLE:
if (inCorrectExample) {
correctExamples.add(new StringPair(srcExample.getExample(), trgExample.getExample()));
} else if (inIncorrectExample) {
StringPair examplePair = new StringPair(srcExample.getExample(), trgExample.getExample());
if (trgExample.getCorrections().isEmpty()) {
incorrectExamples.add(new IncorrectBitextExample(examplePair));
} else {
List<String> corrections = trgExample.getCorrections();
incorrectExamples.add(new IncorrectBitextExample(examplePair, corrections));
}
}
inCorrectExample = false;
inIncorrectExample = false;
inErrorTriggerExample = false;
break;
default:
super.endElement(namespaceURI, sName, qName);
break;
}
}
private IncorrectExample setExample() {<FILL_FUNCTION_BODY>}
private PatternRule finalizeRule() {
PatternRule rule = null;
if (phrasePatternTokens.isEmpty()) {
rule = new PatternRule(id, language, patternTokens,
name, "", shortMessage.toString());
prepareRule(rule);
} else {
if (!patternTokens.isEmpty()) {
for (List<PatternToken> ph : phrasePatternTokens) {
ph.addAll(new ArrayList<>(patternTokens));
}
}
for (List<PatternToken> phrasePatternToken : phrasePatternTokens) {
processElement(phrasePatternToken);
rule = new PatternRule(id, language, phrasePatternToken,
name, message.toString(), shortMessage.toString(), "",
phrasePatternTokens.size() > 1);
prepareRule(rule);
}
}
patternTokens.clear();
if (phrasePatternTokens != null) {
phrasePatternTokens.clear();
}
startPositionCorrection = 0;
endPositionCorrection = 0;
return rule;
}
}
|
IncorrectExample example = null;
if (inCorrectExample) {
example = new IncorrectExample(correctExample.toString());
} else if (inIncorrectExample) {
if (exampleCorrection != null) {
List<String> corrections = new ArrayList<>(Arrays.asList(exampleCorrection.toString().split("\\|")));
if (exampleCorrection.toString().endsWith("|")) { // suggestions plus an empty suggestion (split() will ignore trailing empty items)
corrections.add("");
}
example = new IncorrectExample(incorrectExample.toString(), corrections);
} else {
example = new IncorrectExample(incorrectExample.toString());
}
} else if (inErrorTriggerExample) {
throw new RuntimeException("'triggers_error' is not supported for bitext XML");
}
correctExample = new StringBuilder();
incorrectExample = new StringBuilder();
exampleCorrection = null;
return example;
| 1,255
| 244
| 1,499
|
<methods>public void <init>() ,public void <init>(java.lang.String) ,public void <init>(java.lang.String, org.languagetool.Language) ,public void characters(char[], int, int) ,public void endElement(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public org.xml.sax.InputSource resolveEntity(java.lang.String, java.lang.String) throws java.io.IOException, org.xml.sax.SAXException,public void startElement(java.lang.String, java.lang.String, java.lang.String, org.xml.sax.Attributes) throws org.xml.sax.SAXException<variables>private static final java.lang.String EXTERNAL,static final java.lang.String MARKER_TAG,static final java.lang.String PLEASE_SPELL_ME,static final java.lang.String RAW_TAG,public static final java.lang.String TYPE,private int antiPatternCounter,protected org.languagetool.rules.Category category,protected java.lang.String categoryIssueType,private boolean defaultOff,private boolean defaultTempOff,private int distanceTokens,private int endPos,protected java.lang.String filterArgs,protected java.lang.String filterClassName,private java.lang.String idPrefix,private boolean inAntiPattern,private boolean inRule,private final Map<java.lang.String,java.net.URL> internedUrls,private boolean interpretPosTagsPreDisambiguation,private boolean isRuleSuppressMisspelled,private boolean isSuggestionSuppressMisspelled,private int minPrevMatches,protected java.lang.String name,private boolean relaxedMode,private final List<org.languagetool.tagging.disambiguation.rules.DisambiguationPatternRule> ruleAntiPatterns,private boolean ruleGroupDefaultOff,private boolean ruleGroupDefaultTempOff,private java.lang.String ruleGroupDescription,private int ruleGroupDistanceTokens,protected java.lang.String ruleGroupIssueType,private int ruleGroupMinPrevMatches,protected java.lang.String ruleIssueType,private final List<org.languagetool.tagging.disambiguation.rules.DisambiguationPatternRule> rulegroupAntiPatterns,protected final non-sealed java.lang.String sourceFile,private int startPos,private int subId,private int tokenCountForMarker
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/bitext/BitextPatternRuleLoader.java
|
BitextPatternRuleLoader
|
getRules
|
class BitextPatternRuleLoader extends DefaultHandler {
public final List<BitextPatternRule> getRules(InputStream is, String filename) throws IOException {<FILL_FUNCTION_BODY>}
}
|
List<BitextPatternRule> rules;
try {
BitextPatternRuleHandler handler = new BitextPatternRuleHandler();
SAXParserFactory factory = SAXParserFactory.newInstance();
SAXParser saxParser = factory.newSAXParser();
saxParser.getXMLReader().setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
saxParser.parse(is, handler);
rules = handler.getBitextRules();
return rules;
} catch (Exception e) {
throw new IOException("Cannot load or parse '" + filename + "'", e);
}
| 53
| 165
| 218
|
<methods>public void <init>() ,public void characters(char[], int, int) throws org.xml.sax.SAXException,public void endDocument() throws org.xml.sax.SAXException,public void endElement(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void endPrefixMapping(java.lang.String) throws org.xml.sax.SAXException,public void error(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public void fatalError(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public void ignorableWhitespace(char[], int, int) throws org.xml.sax.SAXException,public void notationDecl(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void processingInstruction(java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public org.xml.sax.InputSource resolveEntity(java.lang.String, java.lang.String) throws java.io.IOException, org.xml.sax.SAXException,public void setDocumentLocator(org.xml.sax.Locator) ,public void skippedEntity(java.lang.String) throws org.xml.sax.SAXException,public void startDocument() throws org.xml.sax.SAXException,public void startElement(java.lang.String, java.lang.String, java.lang.String, org.xml.sax.Attributes) throws org.xml.sax.SAXException,public void startPrefixMapping(java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void unparsedEntityDecl(java.lang.String, java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void warning(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/patterns/bitext/FalseFriendsAsBitextLoader.java
|
FalseFriendsAsBitextLoader
|
getFalseFriendsAsBitext
|
class FalseFriendsAsBitextLoader {
public List<BitextPatternRule> getFalseFriendsAsBitext(
String filename, Language motherTongue,
Language language) throws ParserConfigurationException,
SAXException, IOException {<FILL_FUNCTION_BODY>}
}
|
FalseFriendRuleLoader ruleLoader = new FalseFriendRuleLoader(motherTongue);
List<BitextPatternRule> bRules = new ArrayList<>();
List<AbstractPatternRule> rules1 = ruleLoader.getRules(
JLanguageTool.getDataBroker().getFromRulesDirAsStream(filename),
motherTongue, language);
List<AbstractPatternRule> rules2 = ruleLoader.getRules(
JLanguageTool.getDataBroker().getFromRulesDirAsStream(filename),
language, motherTongue);
Map<String, AbstractPatternRule> srcRules = new HashMap<>();
for (AbstractPatternRule rule : rules1) {
srcRules.put(rule.getId(), rule);
}
for (AbstractPatternRule rule : rules2) {
if (srcRules.containsKey(rule.getId())) {
BitextPatternRule bRule = new BitextPatternRule(
srcRules.get(rule.getId()), rule);
bRule.setSourceLanguage(motherTongue);
bRule.setCategory(rule.getCategory());
bRules.add(bRule);
}
}
return bRules;
| 77
| 297
| 374
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/CachingWordListLoader.java
|
CachingWordListLoader
|
load
|
class CachingWordListLoader {
// Speed up the server use case, where rules get initialized for every call.
private static final LoadingCache<String, List<String>> cache = CacheBuilder.newBuilder()
.expireAfterWrite(10, TimeUnit.MINUTES)
.build(new CacheLoader<String, List<String>>() {
@Override
public List<String> load(@NotNull String fileInClassPath) throws IOException {<FILL_FUNCTION_BODY>}
});
public List<String> loadWords(String filePath) {
return cache.getUnchecked(filePath);
}
}
|
List<String> result = new ArrayList<>();
if (!JLanguageTool.getDataBroker().resourceExists(fileInClassPath)) {
return result;
}
List<String> lines = JLanguageTool.getDataBroker().getFromResourceDirAsLines(fileInClassPath);
for (String line : lines) {
if (line.isEmpty() || line.startsWith("#")) {
continue;
}
result.add(StringUtils.substringBefore(line.trim(), "#").trim());
}
return Collections.unmodifiableList(result);
| 158
| 146
| 304
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/ForeignLanguageChecker.java
|
ForeignLanguageChecker
|
check
|
class ForeignLanguageChecker {
private static final float ERROR_THRESHOLD = 0.45f;
private static final int MIN_SENTENCE_THRESHOLD = 3;
private static final int MAX_SCORING_LANGUAGES = 5;
public static final String NO_FOREIGN_LANG_DETECTED = "NO_FOREIGN_LANG_DETECTED";
private final String languageShortCode;
private final String sentence;
private final long sentenceLength;
private final List<String> preferredLanguages;
public ForeignLanguageChecker(String languageShortCode, String sentence, Long sentenceLength, List<String> preferredLanguages) {
this.languageShortCode = languageShortCode;
this.sentence = sentence;
this.sentenceLength = sentenceLength;
this.preferredLanguages = Collections.unmodifiableList(preferredLanguages);
}
@NotNull
public Map<String, Float> check(int matchesSoFar) throws IOException {<FILL_FUNCTION_BODY>}
}
|
float errorRatio = (float) matchesSoFar / sentenceLength;
if (sentenceLength >= MIN_SENTENCE_THRESHOLD && errorRatio >= ERROR_THRESHOLD) {
LanguageIdentifier langIdent = LanguageIdentifierService.INSTANCE.getInitialized();
if (langIdent != null) {
//for now, we just use the result if also in preferredLanguages to prevent false positive
List<DetectedLanguage> detectedLanguageScores = langIdent.getDetectedLanguageScores(sentence, Collections.emptyList(), preferredLanguages, true, MAX_SCORING_LANGUAGES);
Map<String, Float> results = new LinkedHashMap<>(MAX_SCORING_LANGUAGES);
if (!detectedLanguageScores.isEmpty()) {
for (int i = 0; i < detectedLanguageScores.size(); i++) {
DetectedLanguage detectedLanguage = detectedLanguageScores.get(i);
Language language = detectedLanguage.getDetectedLanguage();
//The text main language still has the highest threshold
if (i == 0 && language.getShortCode().equals(languageShortCode)) {
return Collections.singletonMap(NO_FOREIGN_LANG_DETECTED, 0.99f);
}
//DO NEVER enable traceLevel for this class in production @LanguageTool
log.trace("Found '{}' sentence in '{}' text: '{}' with confidence {} from source '{}'",
language.getShortCode(),
languageShortCode,
sentence,
detectedLanguage.getDetectionConfidence(),
detectedLanguage.getDetectionSource());
results.put(language.getShortCode(), detectedLanguage.getDetectionConfidence());
}
return results;
} else {
return Collections.singletonMap(NO_FOREIGN_LANG_DETECTED, 0.99f);
}
}
}
return Collections.emptyMap();
| 266
| 486
| 752
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/RuleWithLanguage.java
|
RuleWithLanguage
|
equals
|
class RuleWithLanguage {
private final Rule rule;
private final Language language;
public RuleWithLanguage(Rule rule, Language language) {
this.rule = Objects.requireNonNull(rule);
this.language = Objects.requireNonNull(language);
}
public Language getLanguage() {
return language;
}
public Rule getRule() {
return rule;
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(rule, language);
}
}
|
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RuleWithLanguage that = (RuleWithLanguage) o;
return Objects.equals(rule, that.rule) && Objects.equals(language, that.language);
| 161
| 75
| 236
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/VagueSpellChecker.java
|
VagueSpellChecker
|
getSpellingCheckRule
|
class VagueSpellChecker {
private final static Map<Language, Rule> langToRule = new HashMap<>();
private final static Map<Language, Dictionary> langToDict = new HashMap<>();
public boolean isValidWord(String word, Language lang) {
Rule rule = langToRule.get(lang);
if (rule == null) {
Rule tempRule = getSpellingCheckRule(lang);
if (tempRule instanceof HunspellRule) {
rule = tempRule;
} else if (tempRule instanceof MorfologikSpellerRule) {
rule = new NonThreadSafeSpellRule(JLanguageTool.getMessageBundle(), lang, null);
}
langToRule.put(lang, rule);
}
if (rule instanceof NonThreadSafeSpellRule) {
// indicates a Morfologik-based speller - it's not thread-safe, so re-create Speller in isMisspelled():
return ((NonThreadSafeSpellRule) rule).isMisspelled(word);
} else if (rule instanceof HunspellRule) {
// it's okay to use a cached rule, as hunspell-based "isMisspelled()" is thread-safe
return !((HunspellRule) rule).isMisspelled(word);
} else {
throw new RuntimeException("Unknown rule type for language " + lang.getShortCodeWithCountryAndVariant() + ": " + rule);
}
}
private SpellingCheckRule getSpellingCheckRule(Language lang) {<FILL_FUNCTION_BODY>}
private class NonThreadSafeSpellRule extends SpellingCheckRule {
private NonThreadSafeSpellRule(ResourceBundle messages, Language language, UserConfig userConfig) {
super(messages, language, userConfig);
}
@Override
public String getId() {
return "FAKE_FOR_VAGUE_SPELL_CHECKER";
}
@Override
public String getDescription() {
return "internal";
}
@Override
public RuleMatch[] match(AnalyzedSentence sentence) throws IOException {
throw new RuntimeException("not implemented");
}
@Override
public boolean isMisspelled(String word) {
try {
Dictionary dict = langToDict.get(language); // Dictionary itself is thread-safe, so it can be cached and re-used
if (dict == null) {
SpellingCheckRule spellingRule = getSpellingCheckRule(language);
dict = Dictionary.read(JLanguageTool.getDataBroker().getFromResourceDirAsUrl(((MorfologikSpellerRule)spellingRule).getFileName()));
langToDict.put(language, dict);
}
Speller speller = new Speller(dict, 1);
return !speller.isMisspelled(word);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}
|
JLanguageTool lt = new JLanguageTool(lang);
SpellingCheckRule spellRule = null;
for (Rule r : lt.getAllActiveRules()) {
if (r instanceof HunspellRule || r instanceof MorfologikSpellerRule) {
spellRule = (SpellingCheckRule) r;
// TODO: what if there's more than one spell rule?
break;
}
}
if (spellRule == null) {
throw new RuntimeException("No spelling rule found for language " + lang.getShortCodeWithCountryAndVariant() +
" - make sure to set 'preferredVariants' so a variant with a spell checker can be selected");
}
return spellRule;
| 740
| 181
| 921
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/hunspell/DumontsHunspellDictionary.java
|
DumontsHunspellDictionary
|
add
|
class DumontsHunspellDictionary implements HunspellDictionary {
private final Hunspell hunspell;
private boolean closed = false;
public DumontsHunspellDictionary(Path dictionary, Path affix) {
try {
hunspell = new Hunspell(dictionary,affix);
} catch (UnsatisfiedLinkError e) {
throw new RuntimeException("Could not create hunspell instance. Please note that LanguageTool supports only 64-bit platforms " +
"(Linux, Windows, Mac) and that it requires a 64-bit JVM (Java).", e);
}
}
@Override
public boolean spell(String word) {
if (closed) {
throw new RuntimeException("Attempt to use hunspell instance after closing");
}
return hunspell.spell(word);
}
@Override
public void add(String word) {<FILL_FUNCTION_BODY>}
@Override
public List<String> suggest(String word) {
if (closed) {
throw new RuntimeException("Attempt to use hunspell instance after closing");
}
return Arrays.asList(hunspell.suggest(word));
}
@Override
public boolean isClosed() {
return closed;
}
@Override
public void close() throws IOException {
closed = true;
hunspell.close();
}
}
|
if (closed) {
throw new RuntimeException("Attempt to use hunspell instance after closing");
}
hunspell.add(word);
| 366
| 41
| 407
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/hunspell/Hunspell.java
|
LanguageAndPath
|
equals
|
class LanguageAndPath {
private final Path dictionary;
private final Path affix;
LanguageAndPath(Path dictionary, Path affix) {
this.dictionary = Objects.requireNonNull(dictionary);
this.affix = Objects.requireNonNull(affix);
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(dictionary, affix);
}
}
|
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
LanguageAndPath that = (LanguageAndPath) o;
return Objects.equals(dictionary, that.dictionary) &&
Objects.equals(affix, that.affix);
| 130
| 81
| 211
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/morfologik/MorfologikSpeller.java
|
MorfologikSpeller
|
getFrequency
|
class MorfologikSpeller {
// Speed up the server use case, where rules get initialized for every call.
// See https://github.com/morfologik/morfologik-stemming/issues/69 for confirmation that
// Dictionary is thread-safe:
private static final LoadingCache<String, Dictionary> dictCache = CacheBuilder.newBuilder()
//.maximumSize(0)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build(new CacheLoader<String, Dictionary>() {
@Override
public Dictionary load(@NotNull String fileInClassPath) throws IOException {
ResourceDataBroker dataBroker = JLanguageTool.getDataBroker();
if (dataBroker.resourceExists(fileInClassPath)) {
return Dictionary.read(dataBroker.getFromResourceDirAsUrl(fileInClassPath));
} else {
return Dictionary.read(Paths.get(fileInClassPath));
}
}
});
private final Dictionary dictionary;
private final Speller speller;
private final int maxEditDistance;
/**
* Creates a speller with the given maximum edit distance.
* @param fileInClassPath path in classpath to morfologik dictionary
*/
public MorfologikSpeller(String fileInClassPath, int maxEditDistance) {
this(getDictionaryWithCaching(fileInClassPath), maxEditDistance);
}
/**
* Creates a speller with a maximum edit distance of one.
* @param fileInClassPath path in classpath to morfologik dictionary
*/
public MorfologikSpeller(String fileInClassPath) throws IOException {
this(fileInClassPath, 1);
}
/** @since 2.9 */
MorfologikSpeller(Dictionary dictionary, int maxEditDistance) {
if (maxEditDistance <= 0) {
throw new RuntimeException("maxEditDistance must be > 0: " + maxEditDistance);
}
this.dictionary = dictionary;
this.maxEditDistance = maxEditDistance;
speller = new Speller(dictionary, maxEditDistance);
}
/**
* Load a dictionary from the given path or reuse a cached instance, if present.
* @param fileInClassPath path in classpath to morfologik dictionary
*/
public static Dictionary getDictionaryWithCaching(@NotNull String fileInClassPath) {
return dictCache.getUnchecked(fileInClassPath);
}
public boolean isMisspelled(String word) {
if (word.isEmpty() || SpellingCheckRule.LANGUAGETOOL.equals(word) || SpellingCheckRule.LANGUAGETOOLER.equals(word)) {
return false;
}
synchronized (this) {
return speller.isMisspelled(word);
}
}
public synchronized List<String> findReplacements(String word) {
return speller.findReplacements(word);
}
public synchronized List<String> findSimilarWords(String word) {
return speller.findSimilarWords(word);
}
/**
* @deprecated use (or introduce) other methods to this class which would take care of the necessary synchronization
*/
@Deprecated
public Speller getSpeller() {
return speller;
}
public List<WeightedSuggestion> getSuggestions(String word) {
List<WeightedSuggestion> suggestions = new ArrayList<>();
if (word.length() > StringMatcher.MAX_MATCH_LENGTH) {
return suggestions;
}
// needs to be reset every time, possible bug: HMatrix for distance computation is not reset;
// output changes when reused
Speller speller = new Speller(dictionary, maxEditDistance);
List<Speller.CandidateData> replacementCandidates;
if (word.length() < 50) { // slow for long words (the limit is arbitrary)
replacementCandidates = speller.findReplacementCandidates(word);
for (Speller.CandidateData candidate : replacementCandidates) {
suggestions.add(new WeightedSuggestion(candidate.getWord(), candidate.getDistance()));
}
}
List<Speller.CandidateData> runOnCandidates = speller.replaceRunOnWordCandidates(word);
for (Speller.CandidateData runOnCandidate : runOnCandidates) {
suggestions.add(new WeightedSuggestion(runOnCandidate.getWord(), runOnCandidate.getDistance()));
}
// all upper-case suggestions if necessary
if (dictionary.metadata.isConvertingCase() && StringTools.isAllUppercase(word)) {
for (int i = 0; i < suggestions.size(); i++) {
WeightedSuggestion sugg = suggestions.get(i);
String allUppercase = sugg.getWord().toUpperCase();
// do not use capitalized word if it matches the original word or it's mixed case
if (allUppercase.equals(word) || StringTools.isMixedCase(suggestions.get(i).getWord())) {
allUppercase = sugg.getWord();
}
// remove duplicates
int auxIndex = getSuggestionIndex(suggestions, allUppercase);
if (auxIndex > i) {
suggestions.remove(auxIndex);
}
if (auxIndex > -1 && auxIndex < i) {
suggestions.remove(i);
i--;
} else {
suggestions.set(i, new WeightedSuggestion(allUppercase, sugg.getWeight()));
}
}
}
// capitalize suggestions if necessary
else if (dictionary.metadata.isConvertingCase() && StringTools.startsWithUppercase(word)) {
for (int i = 0; i < suggestions.size(); i++) {
WeightedSuggestion sugg = suggestions.get(i);
String uppercaseFirst = StringTools.uppercaseFirstChar(sugg.getWord());
// do not use capitalized word if it matches the original word or it's mixed case
if (uppercaseFirst.equals(word) || StringTools.isMixedCase(suggestions.get(i).getWord())) {
uppercaseFirst = sugg.getWord();
}
// remove capitalized duplicates
int auxIndex = getSuggestionIndex(suggestions, uppercaseFirst);
if (auxIndex > i) {
suggestions.remove(auxIndex);
}
if (auxIndex > -1 && auxIndex < i) {
suggestions.remove(i);
i--;
} else {
suggestions.set(i, new WeightedSuggestion(uppercaseFirst, sugg.getWeight()));
}
}
}
return suggestions;
}
private int getSuggestionIndex(List<WeightedSuggestion> suggestions, String uppercaseFirst) {
int i = 0;
for (WeightedSuggestion suggestion : suggestions) {
if (suggestion.getWord().equals(uppercaseFirst)) {
return i;
}
i++;
}
return -1;
}
/**
* Determines whether the dictionary uses case conversions.
* @return True when the speller uses spell conversions.
* @since 2.5
*/
public boolean convertsCase() {
return speller.convertsCase();
}
@Override
public String toString() {
return "dist=" + maxEditDistance;
}
public synchronized int getFrequency(String word) {<FILL_FUNCTION_BODY>}
}
|
int freq = speller.getFrequency(word);
if (freq == 0 && !word.equals(word.toLowerCase())) {
freq = speller.getFrequency(word.toLowerCase());
}
return freq;
| 1,924
| 66
| 1,990
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/morfologik/suggestions_ordering/DetailedDamerauLevenstheinDistance.java
|
Replace
|
apply
|
class Replace extends EditOperation {
@Override
public String apply(String s) {<FILL_FUNCTION_BODY>}
}
|
if (s.length() == 0) {
return null;
}
int i = random.nextInt(s.length());
char c = randomChar(random);
if (i == 0) {
return "" + c + s.substring(1);
} else if (i == s.length() - 1) {
return s.substring(0, i) + c;
} else {
return s.substring(0, i) + c + s.substring(i + 1);
}
| 38
| 135
| 173
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/multitoken/MultitokenSpellerFilter.java
|
MultitokenSpellerFilter
|
acceptRuleMatch
|
class MultitokenSpellerFilter extends RuleFilter {
/* Provide suggestions for misspelled multitoken expressions, usually proper nouns*/
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> arguments, int patternTokenPos,
AnalyzedTokenReadings[] patternTokens, List<Integer> tokenPositions) throws IOException {<FILL_FUNCTION_BODY>}
public boolean isMisspelled(String s, Language language) throws IOException {
SpellingCheckRule spellerRule = language.getDefaultSpellingRule();
if (spellerRule == null) {
return false;
}
List<String> tokens = language.getWordTokenizer().tokenize(s);
for (String token : tokens) {
if (spellerRule.isMisspelled(token)) {
return true;
};
}
return false;
}
}
|
if (Arrays.stream(patternTokens).allMatch(x -> x.isIgnoredBySpeller())) {
return null;
}
String underlinedError = match.getOriginalErrorStr();
Language lang = ((PatternRule) match.getRule()).getLanguage();
// check the spelling for some languages in a different way
boolean areTokensAcceptedBySpeller = false;
if (lang.getShortCode().equals("en") || lang.getShortCode().equals("de") || lang.getShortCode().equals("pt")
|| lang.getShortCode().equals("nl")) {
if (lang.getShortCodeWithCountryAndVariant().length()==2) {
// needed in testing
lang = lang.getDefaultLanguageVariant();
}
areTokensAcceptedBySpeller = !isMisspelled(underlinedError, lang) ;
}
List<String> replacements = lang.getMultitokenSpeller().getSuggestions(underlinedError, areTokensAcceptedBySpeller);
if (replacements.isEmpty()) {
return null;
}
// all upper-case suggestions
if (underlinedError.length()>4 && StringTools.isAllUppercase(underlinedError)) {
List<String> allupercaseReplacements = new ArrayList<>();
for (String replacement : replacements) {
String newReplacement = replacement.toUpperCase();
if (!allupercaseReplacements.contains(newReplacement) && !underlinedError.equals(newReplacement)) {
allupercaseReplacements.add(newReplacement);
}
}
replacements = allupercaseReplacements;
} else {
// capitalize suggestion at sentence start
int wordsStartPos = 1;
// ignore punctuation marks at the sentence start to do the capitalization
AnalyzedTokenReadings[] tokens = match.getSentence().getTokensWithoutWhitespace();
while (wordsStartPos<tokens.length && (StringTools.isPunctuationMark(tokens[wordsStartPos].getToken())
|| StringTools.isNotWordString((tokens[wordsStartPos].getToken())))) {
wordsStartPos++;
}
if (patternTokenPos==wordsStartPos) {
List<String> capitalizedReplacements = new ArrayList<>();
for (String replacement : replacements) {
String newReplacement = replacement;
if (replacement.equals(replacement.toLowerCase())) {
//do not capitalize iPad
newReplacement = StringTools.uppercaseFirstChar(replacement);
}
if (!capitalizedReplacements.contains(newReplacement) && !underlinedError.equals(newReplacement)) {
capitalizedReplacements.add(newReplacement);
}
}
replacements = capitalizedReplacements;
}
}
if (replacements.isEmpty()) {
return null;
}
match.setSuggestedReplacements(replacements);
return match;
| 222
| 764
| 986
|
<methods>public non-sealed void <init>() ,public abstract org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) throws java.io.IOException,public boolean matches(Map<java.lang.String,java.lang.String>, org.languagetool.AnalyzedTokenReadings[], int, List<java.lang.Integer>) throws java.io.IOException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/suggestions/SuggestionsChanges.java
|
Report
|
run
|
class Report implements Runnable {
private final BufferedWriter reportWriter;
Report(BufferedWriter reportWriter) {
this.reportWriter = reportWriter;
}
@Override
public void run() {<FILL_FUNCTION_BODY>}
}
|
if (reportWriter == null) {
return;
}
try {
StringBuilder report = new StringBuilder();
report.append("Overall report:\n\n");
SuggestionChangesExperiment best = null;
int bestId = -1;
double bestAccuracy = 0.0;
int experimentId = 0;
for (SuggestionChangesExperiment experiment : experiments) {
experimentId++;
int correct = correctSuggestions.getOrDefault(experiment, 0);
int score = suggestionPosSum.getOrDefault(experiment, 0);
int notFound = notFoundSuggestions.getOrDefault(experiment, 0);
int total = numSamples.getOrDefault(experiment, 0);
double accuracy = (double) correct / total * 100.0;
double speed = (double) textSize.getOrDefault(experiment, 0) /
computationTime.getOrDefault(experiment, 0L) * 1000.0;
if (accuracy > bestAccuracy) {
best = experiment;
bestAccuracy = accuracy;
bestId = experimentId;
}
report.append(String.format("Experiment #%d (%s): %d / %d correct suggestions -> %f%% accuracy;" +
" score (less = better): %d; not found: %d; processed %f chars/second.%n",
experimentId, experiment, correct, total, accuracy, score, notFound, speed));
}
report.append(String.format("%nBest experiment: #%d (%s) @ %f%% accuracy%n", bestId, best, bestAccuracy));
for (SuggestionChangesDataset dataset : config.datasets) {
report.append(String.format("%nReport for dataset: %s%n", dataset.name));
best = null;
bestAccuracy = 0f;
bestId = -1;
experimentId = 0;
for (SuggestionChangesExperiment experiment : experiments) {
experimentId++;
Pair<SuggestionChangesExperiment, SuggestionChangesDataset> source = Pair.of(experiment, dataset);
int correct = datasetCorrectSuggestions.getOrDefault(source, 0);
int score = datasetSuggestionPosSum.getOrDefault(source, 0);
int notFound = datasetNotFoundSuggestions.getOrDefault(source, 0);
int total = datasetNumSamples.getOrDefault(source, 0);
double accuracy = (double) correct / total * 100.0;
double speed = (double) datasetTextSize.getOrDefault(source, 0) /
datasetComputationTime.getOrDefault(source, 0L) * 1000.0;
if (accuracy > bestAccuracy) {
best = experiment;
bestAccuracy = accuracy;
bestId = experimentId;
}
report.append(String.format("Experiment #%d (%s): %d / %d correct suggestions-> %f%% accuracy;" +
" score (less = better): %d; not found: %d; processed %f chars/second.%n",
experimentId, experiment, correct, total, accuracy, score, notFound, speed));
}
report.append(String.format("%nBest experiment: #%d (%s) @ %f%% accuracy%n", bestId, best, bestAccuracy));
}
System.out.println(report);
reportWriter.write(report.toString());
reportWriter.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
| 71
| 922
| 993
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/symspell/implementation/ChunkArray.java
|
ChunkArray
|
add
|
class ChunkArray<T>
{
private static int chunkSize = 4096;//this must be a power of 2, otherwise can't optimize row and col functions
private static int divShift = 12; // number of bits to shift right to do division by chunkSize (the bit position of chunkSize)
public SuggestionStage.Node[][] values; // Note: Node (SymSpell.SuggestionStage.Node) is found in SymSpell.SymSpell.java.
public int count;
ChunkArray(int initialCapacity)
{
int chunks = (initialCapacity + chunkSize - 1) / chunkSize;
values = new SuggestionStage.Node[chunks][];
for (int i = 0; i < values.length; i++) values[i] = new Node[chunkSize];
}
public int add(Node value)
{<FILL_FUNCTION_BODY>}
public void clear()
{
count = 0;
}
public Node getValues(int index) {
return values[row(index)][col(index)];
}
public void setValues(int index, Node value){
values[row(index)][col(index)] = value;
}
public void setValues(int index, Node value, Node[][] list){
list[row(index)][col(index)] = value;
}
private int row(int index) { return index >> divShift; } // same as index / chunkSize
private int col(int index) { return index & (chunkSize - 1); } //same as index % chunkSize
private int capacity() { return values.length * chunkSize; }
}
|
if (count == capacity()) {
Node[][] newValues = Arrays.copyOf(values, values.length + 1);
newValues[values.length] = new Node[chunkSize];
values = newValues;
}
values[row(count)][col(count)] = value;
count++;
return count - 1;
| 427
| 91
| 518
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/symspell/implementation/EditDistance.java
|
EditDistance
|
DamerauLevenshteinDistance
|
class EditDistance {
public enum DistanceAlgorithm{
Damerau
}
private String baseString;
private DistanceAlgorithm algorithm;
private int[] v0;
private int[] v2;
/// <summary>Create a new EditDistance object.</summary>
/// <param name="baseString">The base string to which other strings will be compared.</param>
/// <param name="algorithm">The desired edit distance algorithm.</param>
public EditDistance(String baseString, DistanceAlgorithm algorithm)
{
this.baseString = baseString;
this.algorithm = algorithm;
if (this.baseString.isEmpty()) {
this.baseString = null;
return;
}
if (algorithm == DistanceAlgorithm.Damerau) {
v0 = new int[baseString.length()];
v2 = new int[baseString.length()]; // stores one level further back (offset by +1 position)
}
}
// <summary>compare a string to the base string to determine the edit distance,
/// using the previously selected algorithm.</summary>
/// <param name="string2">The string to compare.</param>
/// <param name="maxDistance">The maximum distance allowed.</param>
/// <returns>The edit distance (or -1 if maxDistance exceeded).</returns>
public int compare(String string2, int maxDistance) {
switch (algorithm) {
case Damerau: return DamerauLevenshteinDistance(string2, maxDistance);
}
throw new IllegalArgumentException("unknown DistanceAlgorithm");
}
// stores one level further back (offset by +1 position)
/// <param name="string1">String being compared for distance.</param>
/// <param name="string2">String being compared against other string.</param>
/// <param name="maxDistance">The maximum edit distance of interest.</param>
/// <returns>int edit distance, >= 0 representing the number of edits required
/// to transform one string to the other, or -1 if the distance is greater than the specified maxDistance.</returns>
public int DamerauLevenshteinDistance(String string2, int maxDistance) {<FILL_FUNCTION_BODY>}
}
|
if (baseString == null) return string2 == null ? 0 : string2.length(); //string2 ?? "").Length;
if (string2 == null || string2.isEmpty()) return baseString.length();
if(maxDistance == 0) return baseString.equals(string2) ? 0 : -1;
// if strings of different lengths, ensure shorter string is in string1. This can result in a little
// faster speed by spending more time spinning just the inner loop during the main processing.
String string1;
if (baseString.length() > string2.length()) {
string1 = string2;
string2 = baseString;
} else {
string1 = baseString;
}
int sLen = string1.length(); // this is also the minimum length of the two strings
int tLen = string2.length();
// suffix common to both strings can be ignored
while ((sLen > 0) && (string1.charAt(sLen - 1) == string2.charAt(tLen - 1))) { sLen--; tLen--; }
int start = 0;
if ((string1.charAt(0) == string2.charAt(0)) || (sLen == 0)) { // if there'string1 a shared prefix, or all string1 matches string2'string1 suffix
// prefix common to both strings can be ignored
while ((start < sLen) && (string1.charAt(start) == string2.charAt(start))) start++;
sLen -= start; // length of the part excluding common prefix and suffix
tLen -= start;
// if all of shorter string matches prefix and/or suffix of longer string, then
// edit distance is just the delete of additional characters present in longer string
if (sLen == 0) return tLen;
string2 = string2.substring(start, start + tLen); // faster than string2[start+j] in inner loop below
}
int lenDiff = tLen - sLen;
if ((maxDistance < 0) || (maxDistance > tLen)) {
maxDistance = tLen;
} else if (lenDiff > maxDistance) return -1;
if (tLen > v0.length)
{
v0 = new int[tLen];
v2 = new int[tLen];
} else {
for(int i = 0; i < tLen; i++) v2[i] = 0; // Substituting Array.clear(v2, 0, tLen)
}
int j;
for (j = 0; j < maxDistance; j++) v0[j] = j + 1;
for (; j < tLen; j++) v0[j] = maxDistance + 1;
int jStartOffset = maxDistance - (tLen - sLen);
boolean haveMax = maxDistance < tLen;
int jStart = 0;
int jEnd = maxDistance;
char sChar = string1.charAt(0);
int current = 0;
for (int i = 0; i < sLen; i++) {
char prevsChar = sChar;
sChar = string1.charAt(start+i);
char tChar = string2.charAt(0);
int left = i;
current = left + 1;
int nextTransCost = 0;
// no need to look beyond window of lower right diagonal - maxDistance cells (lower right diag is i - lenDiff)
// and the upper left diagonal + maxDistance cells (upper left is i)
jStart += (i > jStartOffset) ? 1 : 0;
jEnd += (jEnd < tLen) ? 1 : 0;
for (j = jStart; j < jEnd; j++) {
int above = current;
int thisTransCost = nextTransCost;
nextTransCost = v2[j];
v2[j] = current = left; // cost of diagonal (substitution)
left = v0[j]; // left now equals current cost (which will be diagonal at next iteration)
char prevtChar = tChar;
tChar = string2.charAt(j);
if (sChar != tChar) {
if (left < current) current = left; // insertion
if (above < current) current = above; // deletion
current++;
if ((i != 0) && (j != 0)
&& (sChar == prevtChar)
&& (prevsChar == tChar)) {
thisTransCost++;
if (thisTransCost < current) current = thisTransCost; // transposition
}
}
v0[j] = current;
}
if (haveMax && (v0[i + lenDiff] > maxDistance)) return -1;
}
return (current <= maxDistance) ? current : -1;
| 578
| 1,198
| 1,776
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/symspell/implementation/SuggestItem.java
|
SuggestItem
|
compareTo
|
class SuggestItem implements Comparator<SuggestItem>, Comparable<SuggestItem>
{
/// <summary>The suggested correctly spelled word.</summary>
public String term;
/// <summary>Edit distance between searched for word and suggestion.</summary>
public int distance;
/// <summary>Frequency of suggestion in the dictionary (a measure of how common the word is).</summary>
public long count;
/// <summary>Create a new instance of SymSpell.SuggestItem.</summary>
/// <param name="term">The suggested word.</param>
/// <param name="distance">Edit distance from search word.</param>
/// <param name="count">Frequency of suggestion in dictionary.</param>
public SuggestItem(String term, int distance, long count) {
this.term = term;
this.distance = distance;
this.count = count;
}
@Override
public int compare(SuggestItem suggestItem, SuggestItem t1) {
return suggestItem.compareTo(t1);
}
@Override
public boolean equals(Object obj) {
return obj instanceof SuggestItem && term.equals(((SuggestItem) obj).term);
}
@Override
public int hashCode()
{
return term.hashCode();
}
@Override
public String toString()
{
return "{" + term + ", " + distance + ", " + count + "}";
}
@Override
public int compareTo(SuggestItem other) {<FILL_FUNCTION_BODY>}
public SuggestItem clone(){
return new SuggestItem(this.term, this.distance, this.count);
}
}
|
// order by distance ascending, then by frequency count descending
if (this.distance == other.distance) return Long.compare(other.count, this.count);
return Integer.compare(this.distance, other.distance);
| 444
| 58
| 502
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/spelling/symspell/implementation/SuggestionStage.java
|
Entry
|
add
|
class Entry {
public int count;
public int first;
Entry(int count, int first) {
this.count = count;
this.first = first;
}
}
public Map<Integer, Entry> deletes; // {get; set; }
public ChunkArray<Node> nodes;
/// <summary>Create a new instance of SymSpell.SuggestionStage.</summary>
/// <remarks>Specifying ann accurate initialCapacity is not essential,
/// but it can help speed up processing by alleviating the need for
/// data restructuring as the size grows.</remarks>
/// <param name="initialCapacity">The expected number of words that will be added.</param>
/// <summary>Gets the count of unique delete words.</summary>
public int deleteCount() { return deletes.size(); }
/// <summary>Gets the total count of all suggestions for all deletes.</summary>
public int nodeCount() { return nodes.count; }
/// <summary>Clears all the data from the SuggestionStaging.</summary>
public void clear() {
deletes.clear();
nodes.clear();
}
void add(int deleteHash, String suggestion) {<FILL_FUNCTION_BODY>
|
Entry entry = deletes.getOrDefault(deleteHash, new Entry(0, -1));
int next = entry.first;
entry.count++;
entry.first = nodes.count;
deletes.put(deleteHash, entry);
nodes.add(new Node(suggestion, next));
| 324
| 78
| 402
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/rules/translation/TranslationEntry.java
|
TranslationEntry
|
toString
|
class TranslationEntry {
private final List<String> l1;
private final List<String> l2;
private final int itemCount;
public TranslationEntry(List<String> l1, List<String> l2, int itemCount) {
this.l1 = Objects.requireNonNull(l1);
this.l2 = Objects.requireNonNull(l2);
this.itemCount = itemCount;
}
public int getItemCount() {
return itemCount;
}
public List<String> getL1() {
return l1;
}
public List<String> getL2() {
return l2;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TranslationEntry that = (TranslationEntry) o;
return l1.equals(that.l1) &&
l2.equals(that.l2);
}
@Override
public int hashCode() {
return Objects.hash(l1, l2);
}
}
|
//return l1 + " -> " + l2 + " (itemCount: "+ itemCount + ")";
return l1 + " -> " + l2;
| 321
| 44
| 365
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/synthesis/SynthesizerTools.java
|
SynthesizerTools
|
loadWords
|
class SynthesizerTools {
private SynthesizerTools() {
// static methods only, no public constructor
}
public static List<String> loadWords(InputStream stream) {<FILL_FUNCTION_BODY>}
}
|
List<String> result = new ArrayList<>();
try (Scanner scanner = new Scanner(stream, "UTF-8")) {
while (scanner.hasNextLine()) {
String line = scanner.nextLine().trim();
if (line.isEmpty() || line.charAt(0) == '#') { // ignore empty lines and comments
continue;
}
result.add(line);
}
}
return result;
| 65
| 114
| 179
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tagging/CombiningTagger.java
|
CombiningTagger
|
tag
|
class CombiningTagger implements WordTagger {
private final WordTagger tagger1;
private final WordTagger tagger2;
private final WordTagger removalTagger;
private final boolean overwriteWithSecondTagger;
public CombiningTagger(WordTagger tagger1, WordTagger tagger2, boolean overwriteWithSecondTagger) {
this(tagger1, tagger2, null, overwriteWithSecondTagger);
}
/**
* @param tagger1 typically the tagger that takes its data from the binary file
* @param tagger2 typically the tagger that takes its data from the plain text file {@code added.txt}
* @param removalTagger the tagger that removes readings which takes its data from the plain text file {@code removed.txt}, or {@code null}
* @param overwriteWithSecondTagger if set to {@code true}, only the second tagger's result will be
* used if both first and second tagger can tag that word
* @since 3.2
*/
public CombiningTagger(WordTagger tagger1, WordTagger tagger2, WordTagger removalTagger, boolean overwriteWithSecondTagger) {
this.tagger1 = tagger1;
this.tagger2 = tagger2;
this.removalTagger = removalTagger;
this.overwriteWithSecondTagger = overwriteWithSecondTagger;
}
@Override
public List<TaggedWord> tag(String word) {<FILL_FUNCTION_BODY>}
/**
* @return removal tagger
* @since 5.0
*/
@Nullable
public WordTagger getRemovalTagger() {
return removalTagger;
}
}
|
List<TaggedWord> result = new ArrayList<>();
result.addAll(tagger2.tag(word));
if (!(overwriteWithSecondTagger && result.size() > 0)) {
result.addAll(tagger1.tag(word));
}
if (removalTagger != null) {
List<TaggedWord> removalTags = removalTagger.tag(word);
result.removeAll(removalTags);
}
return result;
| 438
| 121
| 559
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tagging/ManualTagger.java
|
ManualTagger
|
tag
|
class ManualTagger implements WordTagger {
private final static String DEFAULT_SEPARATOR = "\t";
private static final int OFFSET_SHIFT = 8;
private static final int MAX_LENGTH = (1 << OFFSET_SHIFT) - 1;
private static final int MAX_OFFSET = (1 << 32 - OFFSET_SHIFT) - 1;
private static final int ENTRY_SIZE = 2;
private final String[] data;
/** A map from inflected forms to encoded lemma+POS pair offsets in {@link #data} */
private final Object2IntMap<String> map;
public ManualTagger(InputStream inputStream) throws IOException {
this(inputStream, false);
}
public ManualTagger(InputStream inputStream, boolean internTags) throws IOException {
Map<String, List<TaggedWord>> mapping = loadMapping(inputStream, internTags);
map = new Object2IntOpenHashMap<>(mapping.size());
int valueCount = mapping.values().stream().mapToInt(v -> v.size()).sum();
int firstIndex = ENTRY_SIZE; // skip an entry, as 0 means an absent value in TObjectIntHashMap
data = new String[valueCount * ENTRY_SIZE + firstIndex];
if (valueCount > MAX_OFFSET) {
throw new UnsupportedOperationException("Too many values (" + valueCount + "), the storage needs adjusting");
}
int index = firstIndex;
for (Map.Entry<String, List<TaggedWord>> entry : mapping.entrySet()) {
List<TaggedWord> value = entry.getValue();
if (value.size() > MAX_LENGTH) {
throw new UnsupportedOperationException(
"Too many lemmas (" + value.size() + " > " + MAX_LENGTH + " for " + entry.getKey() + "), the storage needs adjusting");
}
map.put(entry.getKey(), ((index / ENTRY_SIZE) << OFFSET_SHIFT) | value.size());
for (TaggedWord tw : value) {
data[index++] = tw.getLemma();
data[index++] = tw.getPosTag();
}
}
}
private static Map<String, List<TaggedWord>> loadMapping(InputStream inputStream, boolean internTags) throws IOException {
Map<String, List<TaggedWord>> map = new HashMap<>();
Map<String, String> interned = new HashMap<>();
try (
InputStreamReader reader = new InputStreamReader(inputStream, StandardCharsets.UTF_8);
BufferedReader br = new BufferedReader(reader)
) {
String line;
int lineCount = 0;
String separator = DEFAULT_SEPARATOR;
while ((line = br.readLine()) != null) {
line = line.trim();
lineCount++;
if (line.startsWith("#separatorRegExp=")) {
separator = line.replace("#separatorRegExp=", "");
}
if (StringTools.isEmpty(line) || line.charAt(0) == '#') {
continue;
}
if (line.contains("\u00A0")) {
throw new RuntimeException("Non-breaking space found in line #" + lineCount + ": '" + line + "', please remove it");
}
line = StringUtils.substringBefore(line, "#").trim();
String[] parts = line.split(separator);
if (parts.length != 3) {
throw new IOException("Unknown line format in line " + lineCount + " when loading manual tagger dictionary, " +
"expected three tab-separated fields: '" + line + "'");
}
String form = parts[0];
String lemma = parts[1];
if (lemma.equals(form)) lemma = form;
lemma = interned.computeIfAbsent(lemma, Function.identity());
String tag = parts[2].trim();
String internedTag = internTags ? tag.intern() : interned.computeIfAbsent(tag, Function.identity());
map.computeIfAbsent(form, __ -> new ArrayList<>()).add(new TaggedWord(lemma, internedTag));
}
}
return map;
}
/**
* Look up a word's baseform (lemma) and POS information.
*/
@Override
public List<TaggedWord> tag(String word) {<FILL_FUNCTION_BODY>}
}
|
int value = map.getInt(word);
if (value == 0) {
return Collections.emptyList();
}
int offset = (value >>> OFFSET_SHIFT) * ENTRY_SIZE;
int length = value & MAX_LENGTH;
List<TaggedWord> result = new ArrayList<>(length);
for (int i = 0; i < length; i++) {
result.add(new TaggedWord(data[offset + i * ENTRY_SIZE], data[offset + i * ENTRY_SIZE + 1]));
}
return result;
| 1,116
| 148
| 1,264
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tagging/MorfologikTagger.java
|
MorfologikTagger
|
tag
|
class MorfologikTagger implements WordTagger {
private final URL dictUrl;
private Dictionary dictionary;
private boolean internTags;
public MorfologikTagger(String dictPath) {
this(JLanguageTool.getDataBroker().getFromResourceDirAsUrl(Objects.requireNonNull(dictPath)));
}
MorfologikTagger(URL dictUrl) {
this.dictUrl = Objects.requireNonNull(dictUrl);
}
/**
* Constructs a MorfologikTagger with the given morfologik dictionary.
* @since 3.4
*/
public MorfologikTagger(Dictionary dictionary) {
this(dictionary, false);
}
public MorfologikTagger(Dictionary dictionary, boolean internTags) {
this.dictUrl = null;
this.dictionary = dictionary;
this.internTags = internTags;
}
private synchronized Dictionary getDictionary() throws IOException {
if (dictionary == null) {
dictionary = Dictionary.read(dictUrl);
}
return dictionary;
}
public boolean getInternTags() {
return internTags;
}
public void setInternTags(boolean enabled) {
internTags = enabled;
}
@Override
public List<TaggedWord> tag(String word) {<FILL_FUNCTION_BODY>}
}
|
List<TaggedWord> result = new ArrayList<>();
try {
IStemmer dictLookup = new DictionaryLookup(getDictionary());
List<WordData> lookup = dictLookup.lookup(word);
for (WordData wordData : lookup) {
String tag = wordData.getTag() == null ? null : wordData.getTag().toString();
// Remove frequency data from tags (if exists)
// The frequency data is in the last byte (without a separator)
if (dictionary.metadata.isFrequencyIncluded() && tag != null && tag.length() > 1) {
tag = tag.substring(0, tag.length() - 1);
}
String stem = wordData.getStem() == null ? null : wordData.getStem().toString();
TaggedWord taggedWord = new TaggedWord(stem, (internTags && tag != null) ? tag.intern() : tag);
result.add(taggedWord);
}
} catch (IOException e) {
throw new RuntimeException("Could not tag word '" + word + "'", e);
}
return result;
| 356
| 281
| 637
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tagging/TaggedWord.java
|
TaggedWord
|
equals
|
class TaggedWord {
private final String lemma;
private final String posTag;
public TaggedWord(String lemma, String posTag) {
this.lemma = lemma;
this.posTag = posTag;
}
public String getLemma() {
return lemma;
}
public String getPosTag() {
return posTag;
}
@Override
public String toString() {
return lemma + "/" + posTag;
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(lemma, posTag);
}
}
|
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TaggedWord other = (TaggedWord) o;
return Objects.equals(lemma, other.lemma) && Objects.equals(posTag, other.posTag);
| 180
| 77
| 257
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tagging/disambiguation/rules/DisambiguatedExample.java
|
DisambiguatedExample
|
toString
|
class DisambiguatedExample {
private final String example;
private final String input;
private final String output;
public DisambiguatedExample(String example) {
this(example, null, null);
}
/**
* @param example Example sentence
* @param input Ambiguous forms of a token (specify in 'word[lemma/POS]' format)
* @param output Disambiguated forms of a token (specify in 'word[lemma/POS]' format)
*/
public DisambiguatedExample(String example, String input, String output) {
this.example = example;
this.input = input;
this.output = output;
}
/**
* Return the example that contains the error.
*/
public String getExample() {
return example;
}
public String getAmbiguous() {
return input;
}
/**
* Return the possible corrections. May be {@code null}.
*/
@Nullable
public String getDisambiguated() {
return output;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return example + ": " + input + " -> " + output;
| 300
| 21
| 321
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tagging/disambiguation/rules/DisambiguationRuleLoader.java
|
DisambiguationRuleLoader
|
getRules
|
class DisambiguationRuleLoader extends DefaultHandler {
public final List<DisambiguationPatternRule> getRules(InputStream stream, Language language, String xmlPath)
throws ParserConfigurationException, SAXException, IOException {<FILL_FUNCTION_BODY>}
}
|
DisambiguationRuleHandler handler = new DisambiguationRuleHandler(language, xmlPath);
SAXParserFactory factory = SAXParserFactory.newInstance();
SAXParser saxParser = factory.newSAXParser();
if (JLanguageTool.isCustomPasswordAuthenticatorUsed()) {
Tools.setPasswordAuthenticator();
}
saxParser.parse(stream, handler);
return handler.getDisambRules();
| 69
| 113
| 182
|
<methods>public void <init>() ,public void characters(char[], int, int) throws org.xml.sax.SAXException,public void endDocument() throws org.xml.sax.SAXException,public void endElement(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void endPrefixMapping(java.lang.String) throws org.xml.sax.SAXException,public void error(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public void fatalError(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException,public void ignorableWhitespace(char[], int, int) throws org.xml.sax.SAXException,public void notationDecl(java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void processingInstruction(java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public org.xml.sax.InputSource resolveEntity(java.lang.String, java.lang.String) throws java.io.IOException, org.xml.sax.SAXException,public void setDocumentLocator(org.xml.sax.Locator) ,public void skippedEntity(java.lang.String) throws org.xml.sax.SAXException,public void startDocument() throws org.xml.sax.SAXException,public void startElement(java.lang.String, java.lang.String, java.lang.String, org.xml.sax.Attributes) throws org.xml.sax.SAXException,public void startPrefixMapping(java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void unparsedEntityDecl(java.lang.String, java.lang.String, java.lang.String, java.lang.String) throws org.xml.sax.SAXException,public void warning(org.xml.sax.SAXParseException) throws org.xml.sax.SAXException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tagging/disambiguation/rules/XmlRuleDisambiguator.java
|
XmlRuleDisambiguator
|
disambiguate
|
class XmlRuleDisambiguator extends AbstractDisambiguator {
private static final String DISAMBIGUATION_FILE = "disambiguation.xml";
private static final String GLOBAL_DISAMBIGUATION_FILE = "disambiguation-global.xml";
private final RuleSet disambiguationRules;
public XmlRuleDisambiguator(Language language) {
// by default, don't use global disambiguation (for now)
this(language, false);
}
public XmlRuleDisambiguator(Language language, boolean useGlobalDisambiguation) {
Objects.requireNonNull(language);
String disambiguationFile = language.getShortCode() + "/" + DISAMBIGUATION_FILE;
List<DisambiguationPatternRule> disambiguationRulesList;
try {
disambiguationRulesList = loadPatternRules(disambiguationFile, language);
} catch (Exception e) {
throw new RuntimeException("Problems with loading disambiguation file: " + disambiguationFile, e);
}
if (useGlobalDisambiguation) {
// disambiguation-global.xml
try {
disambiguationRulesList.addAll(loadPatternRules(GLOBAL_DISAMBIGUATION_FILE, language));
} catch (Exception e) {
throw new RuntimeException("Problems with loading global disambiguation file: " + GLOBAL_DISAMBIGUATION_FILE, e);
}
}
disambiguationRules = RuleSet.textHinted(disambiguationRulesList);
}
@Override
public AnalyzedSentence disambiguate(AnalyzedSentence input) throws IOException {
return disambiguate(input, null);
}
@Override
public AnalyzedSentence disambiguate(AnalyzedSentence sentence,
@Nullable JLanguageTool.CheckCancelledCallback checkCanceled) throws IOException {<FILL_FUNCTION_BODY>}
/**
* Load disambiguation rules from an XML file. Use {@link JLanguageTool#addRule}
* to add these rules to the checking process.
*
* @return a List of {@link DisambiguationPatternRule} objects
*/
protected List<DisambiguationPatternRule> loadPatternRules(String filename, Language language)
throws ParserConfigurationException, SAXException, IOException {
DisambiguationRuleLoader ruleLoader = new DisambiguationRuleLoader();
return ruleLoader.getRules(JLanguageTool.getDataBroker().getFromResourceDirAsStream(filename), language, filename);
}
}
|
for (Rule rule : disambiguationRules.rulesForSentence(sentence)) {
if (checkCanceled != null && checkCanceled.checkCancelled()) {
break;
}
sentence = ((DisambiguationPatternRule) rule).replace(sentence);
}
return sentence;
| 671
| 81
| 752
|
<methods>public non-sealed void <init>() ,public org.languagetool.AnalyzedSentence preDisambiguate(org.languagetool.AnalyzedSentence) <variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tagging/xx/DemoTagger.java
|
DemoTagger
|
tag
|
class DemoTagger implements Tagger {
@Override
public List<AnalyzedTokenReadings> tag(List<String> sentenceTokens) {<FILL_FUNCTION_BODY>}
@Override
public AnalyzedTokenReadings createNullToken(String token, int startPos) {
return new AnalyzedTokenReadings(new AnalyzedToken(token, null, null), startPos);
}
@Override
public AnalyzedToken createToken(String token, String posTag) {
return new AnalyzedToken(token, posTag, null);
}
}
|
List<AnalyzedTokenReadings> tokenReadings = new ArrayList<>();
for (String word : sentenceTokens) {
List<AnalyzedToken> l = new ArrayList<>();
// a real tagger would need to assign a POS tag
// in the next line instead of null:
l.add(new AnalyzedToken(word, null, null));
tokenReadings.add(new AnalyzedTokenReadings(l, 0));
}
return tokenReadings;
| 144
| 120
| 264
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tokenizers/SRXSentenceTokenizer.java
|
SRXSentenceTokenizer
|
setSingleLineBreaksMarksParagraph
|
class SRXSentenceTokenizer implements SentenceTokenizer {
private final SrxDocument srxDocument;
private final Language language;
private String parCode;
/**
* Build a sentence tokenizer based on the rules in the {@code segment.srx} file
* that comes with LanguageTool.
*/
public SRXSentenceTokenizer(Language language) {
this(language, "/segment.srx");
}
/**
* @param srxInClassPath the path to an SRX file in the classpath
* @since 3.2
*/
public SRXSentenceTokenizer(Language language, String srxInClassPath) {
this.language = Objects.requireNonNull(language);
this.srxDocument = SrxTools.createSrxDocument(srxInClassPath);
setSingleLineBreaksMarksParagraph(false);
}
@Override
public final List<String> tokenize(String text) {
return SrxTools.tokenize(text, srxDocument, language.getShortCode() + parCode);
}
@Override
public final boolean singleLineBreaksMarksPara() {
return "_one".equals(parCode);
}
/**
* @param lineBreakParagraphs if <code>true</code>, single lines breaks are assumed to end a
* paragraph; if <code>false</code>, only two ore more consecutive line breaks end a paragraph
*/
@Override
public final void setSingleLineBreaksMarksParagraph(boolean lineBreakParagraphs) {<FILL_FUNCTION_BODY>}
}
|
if (lineBreakParagraphs) {
parCode = "_one";
} else {
parCode = "_two";
}
| 408
| 38
| 446
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tokenizers/SrxTools.java
|
SrxTools
|
createSrxDocument
|
class SrxTools {
private SrxTools() {
}
static SrxDocument createSrxDocument(String path) {<FILL_FUNCTION_BODY>}
static List<String> tokenize(String text, SrxDocument srxDocument, String code) {
List<String> segments = new ArrayList<>();
TextIterator textIterator = new SrxTextIterator(srxDocument, code, text);
while (textIterator.hasNext()) {
segments.add(textIterator.next());
}
return segments;
}
}
|
try {
try (
InputStream inputStream = JLanguageTool.getDataBroker().getFromResourceDirAsStream(path);
BufferedReader srxReader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))
) {
Map<String, Object> parserParameters = new HashMap<>();
parserParameters.put(Srx2SaxParser.VALIDATE_PARAMETER, true);
SrxParser srxParser = new Srx2SaxParser(parserParameters);
return srxParser.parse(srxReader);
}
} catch (IOException e) {
throw new RuntimeException("Could not load SRX rules", e);
}
| 140
| 173
| 313
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tools/ConfidenceKey.java
|
ConfidenceKey
|
equals
|
class ConfidenceKey {
private final Language lang;
private final String ruleId;
public ConfidenceKey(Language lang, String ruleId) {
this.lang = Objects.requireNonNull(lang);
this.ruleId = Objects.requireNonNull(ruleId);
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return Objects.hash(lang, ruleId);
}
@Override
public String toString() {
return lang.getShortCodeWithCountryAndVariant() + "/" + ruleId;
}
}
|
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ConfidenceKey that = (ConfidenceKey) o;
return Objects.equals(lang, that.lang) && Objects.equals(ruleId, that.ruleId);
| 168
| 77
| 245
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tools/ContextTools.java
|
ContextTools
|
getContext
|
class ContextTools {
private int contextSize = 40;
private boolean escapeHtml = true;
private String errorMarkerStart = "<b><font bgcolor=\"#ff8b8b\">";
private String errorMarkerEnd = "</font></b>";
public ContextTools() {
}
public String getContext(int fromPos, int toPos, String contents) {<FILL_FUNCTION_BODY>}
/**
* Get a plain text context that uses {@code ^} characters in a new line as a marker of the
* given string region. Ignores {@link #setEscapeHtml(boolean)}.
* @since 2.3
*/
public String getPlainTextContext(int fromPos, int toPos, String contents) {
// calculate context region:
int startContent = fromPos - contextSize;
String prefix = "...";
String postfix = "...";
if (startContent < 0) {
prefix = "";
startContent = 0;
}
int endContent = toPos + contextSize;
if (endContent > contents.length()) {
postfix = "";
endContent = contents.length();
}
return prefix +
contents.substring(startContent, endContent).replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') +
postfix + '\n' +
getMarker(fromPos, toPos, startContent, endContent, prefix);
}
/**
* Set the string used to mark the beginning and end of an error, e.g. {@code <span class="error">} and {@code </span>}
* @since 5.1
*/
public void setErrorMarker(String start, String end) {
errorMarkerStart = start;
errorMarkerEnd = end;
}
/**
* Set the string used to mark the beginning of an error, e.g. {@code <span class="error">}
*/
public void setErrorMarkerStart(String errorMarkerStart) {
this.errorMarkerStart = errorMarkerStart;
}
/**
* Set the string used to mark the end of an error, e.g. {@code </span>}
*/
public void setErrorMarkerEnd(String errorMarkerEnd) {
this.errorMarkerEnd = errorMarkerEnd;
}
/**
* The context size of the error. This many characters of the original text will be used
* from the left and from the right context of the error.
*/
public void setContextSize(int contextSize) {
this.contextSize = contextSize;
}
/**
* Whether HTML special characters should be escaped.
*/
public void setEscapeHtml(boolean escapeHtml) {
this.escapeHtml = escapeHtml;
}
private static String getMarker(int fromPos, int toPos, int startContent, int endContent, String prefix) {
return StringUtils.repeat(' ', prefix.length() + fromPos - startContent) +
StringUtils.repeat('^', toPos - fromPos) +
StringUtils.repeat(' ', endContent - toPos);
}
}
|
// calculate context region:
int startContent = fromPos - contextSize;
String prefix = "...";
String postfix = "...";
if (startContent < 0) {
prefix = "";
startContent = 0;
}
int endContent = toPos + contextSize;
int textLength = contents.length();
if (endContent > textLength) {
postfix = "";
endContent = textLength;
}
// now build context string plus marker:
StringBuilder sb = new StringBuilder();
sb.append(prefix);
sb.append(contents.substring(startContent, endContent).replace('\n', ' '));
String markerStr = getMarker(fromPos, toPos, startContent, endContent, prefix);
sb.append(postfix);
int startMark = markerStr.indexOf('^');
int endMark = markerStr.lastIndexOf('^');
String result = sb.toString();
if (escapeHtml) {
String escapedErrorPart = StringTools.escapeHTML(result.substring(startMark, endMark + 1))
.replace(" ", " "); // make sure whitespace errors are visible
result = StringTools.escapeHTML(result.substring(0, startMark))
+ errorMarkerStart
+ escapedErrorPart
+ errorMarkerEnd + StringTools.escapeHTML(result.substring(endMark + 1));
} else {
result = result.substring(0, startMark) + errorMarkerStart
+ result.substring(startMark, endMark + 1) + errorMarkerEnd
+ result.substring(endMark + 1);
}
return result;
| 787
| 409
| 1,196
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tools/DiffsAsMatches.java
|
DiffsAsMatches
|
getPseudoMatches
|
class DiffsAsMatches {
public List<PseudoMatch> getPseudoMatches(String original, String revised) {<FILL_FUNCTION_BODY>}
}
|
List<PseudoMatch> matches = new ArrayList<>();
List<String> origList = DiffRowGenerator.SPLITTER_BY_WORD.apply(original);
List<String> revList = DiffRowGenerator.SPLITTER_BY_WORD.apply(revised);
List<AbstractDelta<String>> inlineDeltas = DiffUtils.diff(origList, revList, DiffRowGenerator.DEFAULT_EQUALIZER)
.getDeltas();
PseudoMatch lastMatch = null;
AbstractDelta<String> lastInlineDelta = null;
for (AbstractDelta<String> inlineDelta : inlineDeltas) {
String replacement = String.join("", inlineDelta.getTarget().getLines());
int fromPos = 0;
int errorIndex = inlineDelta.getSource().getPosition();
int indexCorrection = 0; // in case of INSERT, underline the 2 previous tokens (including a whitespace)
if (inlineDelta.getType() == DeltaType.INSERT) {
indexCorrection = 2;
if (errorIndex - indexCorrection < 0) {
indexCorrection = 0;
}
}
for (int i = 0; i < errorIndex - indexCorrection; i++) {
fromPos += origList.get(i).length();
}
boolean wasLastWhitespace = false;
String lastPunctuationStr = "";
if (errorIndex - 1 < origList.size() && errorIndex - 1 > -1) {
wasLastWhitespace = StringTools.isWhitespace(origList.get(errorIndex - 1));
if (StringTools.isPunctuationMark(origList.get(errorIndex - 1))) {
lastPunctuationStr = origList.get(errorIndex - 1);
};
}
String underlinedError = String.join("", inlineDelta.getSource().getLines());
int toPos = fromPos + underlinedError.length();
String prefixReplacement = "";
for (int i = errorIndex - indexCorrection; i < errorIndex; i++) {
toPos += origList.get(i).length();
prefixReplacement = prefixReplacement + origList.get(i);
}
replacement = prefixReplacement + replacement;
// INSERT at the sentence start
if (fromPos == 0 && toPos == 0) {
toPos = origList.get(0).length();
replacement = replacement + origList.get(0);
}
// remove unnecessary whitespace at the end in INSERT
if (inlineDelta.getType() == DeltaType.INSERT && replacement.endsWith(" ") && replacement.length() > 2
&& wasLastWhitespace) {
replacement = replacement.substring(0, replacement.length() - 1);
toPos--;
}
PseudoMatch match;
// serealiza -> se realiza CHANGE + INSERT -> 1 match
if (lastMatch != null && lastInlineDelta.getType() == DeltaType.CHANGE
&& inlineDelta.getType() == DeltaType.INSERT
//&& origList.get(inlineDelta.getSource().getPosition() - 1).equals(" ")
&& (wasLastWhitespace || !lastPunctuationStr.isEmpty())
&& inlineDelta.getSource().getPosition() - 1 == lastInlineDelta.getSource().getPosition()
+ lastInlineDelta.getSource().getLines().size()) {
String newReplacement = lastMatch.getReplacements().get(0) + lastPunctuationStr + replacement.substring(toPos - fromPos);
match = new PseudoMatch(newReplacement, lastMatch.getFromPos(), toPos);
matches.remove(matches.size() - 1);
// CHANGE + DELETE
} else if (lastMatch != null && inlineDelta.getType() == DeltaType.DELETE && wasLastWhitespace
&& lastMatch.getToPos() + 1 == fromPos) {
String newReplacement = lastMatch.getReplacements().get(0);
match = new PseudoMatch(newReplacement, lastMatch.getFromPos(), toPos - 1);
matches.remove(matches.size() - 1);
} else {
match = new PseudoMatch(replacement, fromPos, toPos);
}
matches.add(match);
lastMatch = match;
lastInlineDelta = inlineDelta;
}
return matches;
| 49
| 1,106
| 1,155
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tools/InterruptibleCharSequence.java
|
InterruptibleCharSequence
|
charAt
|
class InterruptibleCharSequence implements CharSequence {
private final CharSequence inner;
public InterruptibleCharSequence(CharSequence inner) {
super();
this.inner = inner;
}
public char charAt(int index) {<FILL_FUNCTION_BODY>}
public int length() {
return inner.length();
}
public CharSequence subSequence(int start, int end) {
return new InterruptibleCharSequence(inner.subSequence(start, end));
}
@NotNull
@Override
public String toString() {
return inner.toString();
}
}
|
if (Thread.interrupted()) {
throw new RuntimeException(new InterruptedException());
}
return inner.charAt(index);
| 159
| 38
| 197
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tools/LtThreadPoolExecutor.java
|
LtThreadPoolExecutor
|
afterExecute
|
class LtThreadPoolExecutor extends ThreadPoolExecutor {
private static final Gauge maxQueueSize = Gauge.build("languagetool_threadpool_max_queue_size", "Queue capacity by threadpool")
.labelNames("pool").register();
private static final Gauge queueSize = Gauge.build("languagetool_threadpool_queue_size", "Queue size by threadpool")
.labelNames("pool").register();
private static final Gauge largestPoolSize = Gauge.build("languagetool_threadpool_largest_queue_size", "The largest number of threads that have ever simultaneously been in the pool")
.labelNames("pool").register();
@Getter
private final String name;
LtThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, @NotNull TimeUnit unit, @NotNull BlockingQueue<Runnable> workQueue, @NotNull ThreadFactory threadFactory, @NotNull RejectedExecutionHandler handler) {
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
this.name = name;
maxQueueSize.labels(name).set(workQueue.remainingCapacity());
}
{
Timer timer = new Timer("LtThreadPoolExecutorMonitor", true);
TimerTask timedAction = new TimerTask() {
@Override
public void run() {
queueSize.labels(name).set(getQueue().size());
largestPoolSize.labels(name).set(getLargestPoolSize());
log.trace("{} queueSize: {}", name, queueSize.labels(name).get());
log.trace("{} largestPoolSize: {}", name, largestPoolSize.labels(name).get());
}
};
timer.scheduleAtFixedRate(timedAction, 0, 1000);
}
@Override
public void execute(@NotNull Runnable command) {
super.execute(command);
}
@Override
public boolean remove(Runnable task) {
return super.remove(task);
}
@Override
protected void afterExecute(Runnable r, Throwable t) {<FILL_FUNCTION_BODY>}
@Override
protected void beforeExecute(Thread t, Runnable r) {
super.beforeExecute(t, r);
}
}
|
super.afterExecute(r, t);
// inherited from removed StoppingThreadPoolExecutor in org.languagetool.server.Server
if (t != null && t instanceof OutOfMemoryError) {
// we prefer to stop instead of being in an unstable state:
//noinspection CallToPrintStackTrace
t.printStackTrace();
System.exit(1);
}
| 591
| 100
| 691
|
<methods>public void <init>(int, int, long, java.util.concurrent.TimeUnit, BlockingQueue<java.lang.Runnable>) ,public void <init>(int, int, long, java.util.concurrent.TimeUnit, BlockingQueue<java.lang.Runnable>, java.util.concurrent.ThreadFactory) ,public void <init>(int, int, long, java.util.concurrent.TimeUnit, BlockingQueue<java.lang.Runnable>, java.util.concurrent.RejectedExecutionHandler) ,public void <init>(int, int, long, java.util.concurrent.TimeUnit, BlockingQueue<java.lang.Runnable>, java.util.concurrent.ThreadFactory, java.util.concurrent.RejectedExecutionHandler) ,public void allowCoreThreadTimeOut(boolean) ,public boolean allowsCoreThreadTimeOut() ,public boolean awaitTermination(long, java.util.concurrent.TimeUnit) throws java.lang.InterruptedException,public void execute(java.lang.Runnable) ,public int getActiveCount() ,public long getCompletedTaskCount() ,public int getCorePoolSize() ,public long getKeepAliveTime(java.util.concurrent.TimeUnit) ,public int getLargestPoolSize() ,public int getMaximumPoolSize() ,public int getPoolSize() ,public BlockingQueue<java.lang.Runnable> getQueue() ,public java.util.concurrent.RejectedExecutionHandler getRejectedExecutionHandler() ,public long getTaskCount() ,public java.util.concurrent.ThreadFactory getThreadFactory() ,public boolean isShutdown() ,public boolean isTerminated() ,public boolean isTerminating() ,public int prestartAllCoreThreads() ,public boolean prestartCoreThread() ,public void purge() ,public boolean remove(java.lang.Runnable) ,public void setCorePoolSize(int) ,public void setKeepAliveTime(long, java.util.concurrent.TimeUnit) ,public void setMaximumPoolSize(int) ,public void setRejectedExecutionHandler(java.util.concurrent.RejectedExecutionHandler) ,public void setThreadFactory(java.util.concurrent.ThreadFactory) ,public void shutdown() ,public List<java.lang.Runnable> shutdownNow() ,public java.lang.String toString() <variables>private static final int COUNT_BITS,private static final int COUNT_MASK,private static final boolean ONLY_ONE,private static final int RUNNING,private static final int SHUTDOWN,private static final int STOP,private static final int TERMINATED,private static final int TIDYING,private volatile boolean allowCoreThreadTimeOut,private long completedTaskCount,private volatile int corePoolSize,private final java.util.concurrent.atomic.AtomicInteger ctl,private static final java.util.concurrent.RejectedExecutionHandler defaultHandler,private volatile java.util.concurrent.RejectedExecutionHandler handler,private volatile long keepAliveTime,private int largestPoolSize,private final java.util.concurrent.locks.ReentrantLock mainLock,private volatile int maximumPoolSize,private static final java.lang.RuntimePermission shutdownPerm,private final java.util.concurrent.locks.Condition termination,private volatile java.util.concurrent.ThreadFactory threadFactory,private final BlockingQueue<java.lang.Runnable> workQueue,private final HashSet<java.util.concurrent.ThreadPoolExecutor.Worker> workers
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tools/LtThreadPoolFactory.java
|
LtRejectedExecutionHandler
|
getFixedThreadPoolExecutor
|
class LtRejectedExecutionHandler extends ThreadPoolExecutor.AbortPolicy {
@Override
public void rejectedExecution(Runnable runnable, ThreadPoolExecutor threadPoolExecutor) {
String pool = ((LtThreadPoolExecutor) threadPoolExecutor).getName();
rejectedTasks.labels(pool).inc();
log.warn(LoggingTools.SYSTEM, "Task rejected from pool '{}' (queue full, all threads exhausted)", pool);
super.rejectedExecution(runnable, threadPoolExecutor);
}
}
private static final LtRejectedExecutionHandler handler = new LtRejectedExecutionHandler();
@NotNull
private static ThreadPoolExecutor getNewThreadPoolExecutor(@NotNull String identifier, int corePool, int maxThreads, int maxTaskInQueue, long keepAliveTimeSeconds, boolean isDaemon, @NotNull Thread.UncaughtExceptionHandler exceptionHandler) {
log.debug(LoggingTools.SYSTEM, String.format("Create new threadPool with corePool: %d maxThreads: %d maxTaskInQueue: %d identifier: %s daemon: %s exceptionHandler: %s", corePool, maxThreads, maxTaskInQueue, identifier, isDaemon, exceptionHandler));
BlockingQueue<Runnable> queue;
if (maxTaskInQueue == 0) {
queue = new LinkedBlockingQueue<>();
} else if (maxTaskInQueue < 0) {
queue = new SynchronousQueue<>();
} else {
// fair = true helps with respecting keep-alive time
queue = new ArrayBlockingQueue<>(maxTaskInQueue, true);
}
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat(identifier + "-%d")
.setDaemon(isDaemon)
.setUncaughtExceptionHandler(exceptionHandler)
.build();
ThreadPoolExecutor newThreadPoolExecutor = new LtThreadPoolExecutor(identifier, corePool, maxThreads, keepAliveTimeSeconds, SECONDS, queue, threadFactory, handler);
return newThreadPoolExecutor;
}
/**
* @param identifier Name of an already created tread-pool
* @return An optional of ThreadPoolExecutor (Null or Object)
*/
public static Optional<ThreadPoolExecutor> getFixedThreadPoolExecutor(@NotNull String identifier) {<FILL_FUNCTION_BODY>
|
ThreadPoolExecutor value = executorServices.get(identifier);
if (value == null) {
log.debug(LoggingTools.SYSTEM, "Request: " + identifier + " not found, returning default pool");
return Optional.of(defaultPool);
} else {
return Optional.of(value);
}
| 566
| 82
| 648
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tools/MostlySingularMultiMap.java
|
MostlySingularMultiMap
|
getList
|
class MostlySingularMultiMap<K, V> {
private final Object2ObjectOpenHashMap<K, Object> map;
public MostlySingularMultiMap(Map<K, List<V>> contents) {
map = new Object2ObjectOpenHashMap<>(contents.size());
for (Map.Entry<K, List<V>> entry : contents.entrySet()) {
List<V> value = entry.getValue();
map.put(entry.getKey(), value.size() == 1 ? value.get(0) : value.toArray());
}
map.trim();
}
@Nullable
public List<V> getList(K key) {<FILL_FUNCTION_BODY>}
}
|
Object o = map.get(key);
//noinspection unchecked
return o == null ? null :
o instanceof Object[] ? Arrays.asList((V[]) o) :
Collections.singletonList((V) o);
| 181
| 63
| 244
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-core/src/main/java/org/languagetool/tools/PseudoMatch.java
|
PseudoMatch
|
toString
|
class PseudoMatch {
private final List<String> replacements;
private final int fromPos;
private final int toPos;
PseudoMatch(String replacement, int fromPos, int toPos) {
this.replacements = new ArrayList<>();
this.replacements.add(replacement);
this.fromPos = fromPos;
this.toPos = toPos;
}
public List<String> getReplacements() {
return this.replacements;
}
public int getFromPos() {
return this.fromPos;
}
public int getToPos() {
return this.toPos;
}
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return String.valueOf(this.fromPos) + "-" + String.valueOf(this.toPos) + "-" + this.replacements.toString();
| 198
| 44
| 242
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/CorrectLinePrinter.java
|
CorrectLinePrinter
|
main
|
class CorrectLinePrinter {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 2) {
System.out.println("Usage: " + CorrectLinePrinter.class.getSimpleName() + " <file> <langCode>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
JLanguageTool lt = new JLanguageTool(Languages.getLanguageForShortCode(args[1]));
lt.activateLanguageModelRules(new File("/home/languagetool/ngram-data/"));
for (String line : lines) {
List<RuleMatch> matches = lt.check(line);
if (matches.size() == 0) {
System.out.println(line);
}
}
| 37
| 195
| 232
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/DuplicateRemover.java
|
DuplicateRemover
|
main
|
class DuplicateRemover {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + DuplicateRemover.class.getSimpleName() + " <file>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
Set<String> seenBefore = new HashSet<>();
for (String line : lines) {
line = line.trim();
if (line.startsWith("#")) {
System.out.println(line);
} else if (seenBefore.contains(line)) {
//System.err.println("Ignoring " + line);
} else {
System.out.println(line);
}
seenBefore.add(line);
}
| 36
| 193
| 229
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/DutchWordSplitter.java
|
DutchWordSplitter
|
main
|
class DutchWordSplitter {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
// exported as documented at https://dev.languagetool.org/developing-a-tagger-dictionary,
// then taking only the full form: awk '{print $1}' dictionary-nl.dump
String filename = "/home/dnaber/lt/dictionary-nl.dump";
MorfologikMultiSpeller speller = new MorfologikMultiSpeller("/nl/spelling/nl_NL.dict", Collections.singletonList("/nl/spelling/spelling.txt"), null, null, 1, null);
List<String> lines = Files.readAllLines(Paths.get(filename));
int lineCount = 0;
long lineTime = System.currentTimeMillis();
for (String line : lines) {
if (!speller.isMisspelled(line)) {
for (int i = 1; i < line.length(); i++) {
String part1 = line.substring(0, i);
String part2 = line.substring(i);
if (!speller.isMisspelled(part1) && !speller.isMisspelled(part2)) {
System.out.println(line + " => " + part1 + " " + part2);
}
}
}
lineCount++;
if (lineCount % 1000 == 0) {
long runTime = System.currentTimeMillis() - lineTime;
lineTime = System.currentTimeMillis();
System.out.printf("lineCount: " + lineCount + " (%.2fs)\n", runTime/1000.0f);
}
}
| 37
| 406
| 443
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/EmptyLuceneIndexCreator.java
|
EmptyLuceneIndexCreator
|
main
|
class EmptyLuceneIndexCreator {
private EmptyLuceneIndexCreator() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + EmptyLuceneIndexCreator.class.getSimpleName() + " <indexPath>");
System.exit(1);
}
Analyzer analyzer = new StandardAnalyzer();
IndexWriterConfig config = new IndexWriterConfig(analyzer);
Directory directory = FSDirectory.open(new File(args[0]).toPath());
try (IndexWriter writer = new IndexWriter(directory, config)) {
FieldType fieldType = new FieldType();
fieldType.setIndexOptions(IndexOptions.DOCS);
fieldType.setStored(true);
Field countField = new Field("totalTokenCount", String.valueOf(0), fieldType);
Document doc = new Document();
doc.add(countField);
writer.addDocument(doc);
}
| 56
| 215
| 271
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/ExampleSentencePrinter.java
|
ExampleSentencePrinter
|
run
|
class ExampleSentencePrinter {
private static final int MAX_BLOCK_SIZE = 5000;
private void run(Language lang) {<FILL_FUNCTION_BODY>}
public static void main(String[] args) {
ExampleSentencePrinter prg = new ExampleSentencePrinter();
prg.run(Languages.getLanguageForShortCode("de"));
}
}
|
File basePath = new File("/home/dnaber/lt/git/languagetool/languagetool-language-modules");
if (!basePath.exists()) {
throw new RuntimeException("basePath does not exist: " + basePath);
}
JLanguageTool tool = new JLanguageTool(lang);
System.out.println("<html>");
System.out.println("<head>");
System.out.println(" <title>LanguageTool examples sentences</title>");
System.out.println(" <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />");
System.out.println("</head>");
System.out.println("<body>");
int i = 1;
int blockSize = 0;
for (Rule rule : tool.getAllActiveRules()) {
List<IncorrectExample> incorrectExamples = rule.getIncorrectExamples();
if (incorrectExamples.size() > 0) {
String example = incorrectExamples.get(0).getExample()
.replace("<marker>", "<b>")
.replace("</marker>", "</b>");
int exampleLength = example.replace("<b>", "").replace("</b>", "").length();
if (blockSize + exampleLength > MAX_BLOCK_SIZE) {
System.out.println("<br><br>");
blockSize = 0;
}
//System.out.println(i + ". " + example + " [" + rule.getId() + "]<br>");
System.out.println(example + "<br>");
blockSize += exampleLength;
i++;
}
}
System.out.println("</body>");
System.out.println("</html>");
| 105
| 461
| 566
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/ExportGermanNouns.java
|
ExportGermanNouns
|
getSortedWords
|
class ExportGermanNouns {
private static final String DICT_FILENAME = "/de/german.dict";
private static final String ADDED_DICT_FILENAME = "languagetool-language-modules/de/src/main/resources/org/languagetool/resource/de/added.txt";
private ExportGermanNouns() {
}
private List<String> getSortedWords() throws IOException {<FILL_FUNCTION_BODY>}
private Set<String> getBinaryDictWords() throws IOException {
FSA fsa = FSA.read(JLanguageTool.getDataBroker().getFromResourceDirAsStream(DICT_FILENAME));
Set<String> set = new HashSet<>();
for (ByteBuffer buffer : fsa) {
byte[] sequence = new byte[buffer.remaining()];
buffer.get(sequence);
String output = new String(sequence, StandardCharsets.UTF_8);
if (isRelevantNoun(output)) {
String[] parts = output.split("_");
String term = parts[0].toLowerCase();
set.add(term);
}
}
return set;
}
private Set<String> getAddedDictWords() throws IOException {
Set<String> set = new HashSet<>();
List<String> lines = Files.readAllLines(FileSystems.getDefault().getPath(ADDED_DICT_FILENAME), Charsets.UTF_8);
for (String line : lines) {
if (isRelevantNoun(line)) {
final String[] parts = line.split("\t");
final String term = parts[0].toLowerCase();
set.add(term);
}
}
return set;
}
private boolean isRelevantNoun(String output) {
boolean isNoun = output.contains("SUB:") || (output.contains("EIG:") && output.contains("COU"));
return isNoun && !output.contains(":ADJ") && !StringTools.isAllUppercase(output);
}
public static void main(String[] args) throws IOException {
ExportGermanNouns prg = new ExportGermanNouns();
List<String> words = prg.getSortedWords();
System.out.println("# DO NOT MODIFY - automatically exported");
System.out.println("# Exporting class: " + ExportGermanNouns.class.getName());
System.out.println("# Export date: " + new Date());
System.out.println("# LanguageTool: " + JLanguageTool.VERSION + " (" + JLanguageTool.BUILD_DATE + ")");
System.out.println("# Potential German compound parts.");
System.out.println("# Data from Morphy (https://danielnaber.de/download/wklassen.pdf)");
System.out.println("# with extensions by LanguageTool (https://languagetool.org)");
System.out.println("# License: Creative Commons Attribution-Share Alike 4.0, http://creativecommons.org/licenses/by-sa/4.0/");
for (String word : words) {
System.out.println(word);
}
//System.err.println("Done. Printed " + words.size() + " words.");
}
}
|
Set<String> words1 = getBinaryDictWords();
Set<String> words2 = getAddedDictWords();
List<String> sortedWords = new ArrayList<>();
sortedWords.addAll(words1);
sortedWords.addAll(words2);
Collections.sort(sortedWords);
return sortedWords;
| 854
| 92
| 946
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/FalseFriendExporter.java
|
FalseFriendExporter
|
main
|
class FalseFriendExporter {
private final static String filename =
"/home/dnaber/lt/git/languagetool/languagetool-core/src/main/resources/org/languagetool/rules/false-friends.xml";
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static void listRuleMessages(Language l1, Language l2) throws IOException {
FalseFriendRuleLoader ruleLoader = new FalseFriendRuleLoader(l1);
List<AbstractPatternRule> rules = ruleLoader.getRules(new File(filename), l2, l1);
int i = 1;
for (AbstractPatternRule rule : rules) {
System.out.println(i + ". " + rule.getMessage().
replaceFirst("Hinweis: ", "").replaceAll("<suggestion>", "'").replaceAll("</suggestion>", "'"));
i++;
}
}
}
|
Language l1 = Languages.getLanguageForShortCode("nl");
Language l2 = Languages.getLanguageForShortCode("en");
listRuleMessages(l1, l2);
//listRuleMessages(l2, l1);
| 243
| 62
| 305
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/FilterFileByLanguage.java
|
FilterFileByLanguage
|
main
|
class FilterFileByLanguage {
private final static String fastTextBinary = "/home/languagetool/fasttext/fasttext";
private final static String fastTextModel = "/home/languagetool/fasttext/lid.176.bin";
private final static String nGramData = "/home/languagetool/model_ml50_new.zip";
private final static float skipThreshold = 0.95f; // only skip if confidence is higher than this
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 2) {
System.out.println("Usage: " + FilterFileByLanguage.class.getSimpleName() + " <langCode> <file>");
System.exit(1);
}
String expectedLang = args[0];
File input = new File(args[1]);
File output = new File(input.getAbsoluteFile() + ".filtered");
LanguageIdentifier ident = LanguageIdentifierService.INSTANCE.getDefaultLanguageIdentifier(0,
new File(nGramData), new File(fastTextBinary), new File(fastTextModel));
Scanner sc = new Scanner(input);
int skipCount = 0;
try (FileWriter fw = new FileWriter(output)) {
while (sc.hasNextLine()) {
String line = sc.nextLine();
DetectedLanguage lang = ident.detectLanguage(line, Collections.emptyList(), Collections.emptyList());
if (lang != null && !lang.getDetectedLanguage().getShortCode().equals(expectedLang) && lang.getDetectionConfidence() > skipThreshold) {
System.out.printf("Skipping (%.2f, %s): %s\n", lang.getDetectionConfidence(), lang.getDetectedLanguage().getShortCode(), line);
skipCount++;
} else {
fw.write(line);
fw.write("\n");
}
}
}
System.out.println(skipCount + " lines skipped, confidence threshold was " + skipThreshold);
System.out.println("Filtered result written to " + output);
| 149
| 395
| 544
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/Fuzzer.java
|
Fuzzer
|
run
|
class Fuzzer {
private final static String[] charList = "0,.-".split("");
private void run() throws IOException {<FILL_FUNCTION_BODY>}
String fuzz(Random rnd, int length) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < charList.length; i++) {
int randomPos = rnd.nextInt(charList.length);
int repeat = rnd.nextInt(length);
String s = StringUtils.repeat(charList[randomPos], repeat);
sb.append(s);
}
return sb.toString();
}
public static void main(String[] args) throws IOException {
new Fuzzer().run();
}
}
|
Random rnd = new Random(231);
for (Language language : Languages.get()) {
JLanguageTool lt = new JLanguageTool(language);
String text = fuzz(rnd, 1000);
long t1 = System.currentTimeMillis();
System.out.println(language.getShortCode() + " with text length of " + text.length() + "...");
System.out.println(">> " + text);
lt.check(text);
long t2 = System.currentTimeMillis();
System.out.println(language.getShortCode() + ": " + (t2-t1) + "ms");
}
| 191
| 172
| 363
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/FuzzerForAnalysis.java
|
FuzzerForAnalysis
|
run
|
class FuzzerForAnalysis extends Fuzzer {
private void run() throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
new FuzzerForAnalysis().run();
}
}
|
for (int i = 0; i < 10; i++) {
System.out.println("-----------------------");
Random rnd = new Random(i);
for (Language language : Languages.get()) {
JLanguageTool lt = new JLanguageTool(language);
Chunker chunker = language.getChunker();
if (chunker != null) {
String text = fuzz(rnd, 2500);
long t1 = System.currentTimeMillis();
System.out.println(language.getShortCode() + " with text length of " + text.length() + "...");
//System.out.println(">> " + text);
lt.getAnalyzedSentence(text);
long t2 = System.currentTimeMillis();
long runtime = t2 - t1;
float relRuntime = (float) runtime / text.length() * 1000;
System.out.printf(language.getShortCode() + ": " + runtime + "ms = %.2f ms/1K chars\n", relRuntime);
}
}
}
| 66
| 280
| 346
|
<methods>public non-sealed void <init>() ,public static void main(java.lang.String[]) throws java.io.IOException<variables>private static final java.lang.String[] charList
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/GenderWordFinder.java
|
GenderWordFinder
|
main
|
class GenderWordFinder {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + GenderWordFinder.class.getSimpleName() + " <file>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
Set<String> candidates = new HashSet<>();
for (String line : lines) {
if (line.endsWith("e")) { // this will probably filter too much...
candidates.add(line);
}
}
GermanyGerman de = new GermanyGerman();
JLanguageTool lt = new JLanguageTool(de);
for (Rule rule : lt.getAllActiveRules()) {
if (!rule.getId().equals("GERMAN_SPELLER_RULE")) {
lt.disableRule(rule.getId());
}
}
GermanSpellerRule speller = new GermanSpellerRule(JLanguageTool.getMessageBundle(), de);
for (String line : lines) {
if (line.endsWith("innen") && candidates.contains(line.replace("innen", "e")) &&
speller.isMisspelled(line.replace("innen", "")) &&
lt.check(line.replace("innen", "*innen")).size() > 0) {
System.out.println(line.replace("innen", "*innen"));
}
}
| 37
| 358
| 395
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/GermanCaseAmbiguityFinder.java
|
GermanCaseAmbiguityFinder
|
main
|
class GermanCaseAmbiguityFinder {
private static final String NGRAMS = "/home/dnaber/data/google-ngram-index/de";
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
LuceneLanguageModel lm = new LuceneLanguageModel(new File(NGRAMS));
Dictionary dictionary = Dictionary.read(JLanguageTool.getDataBroker().getFromResourceDirAsUrl("/de/german.dict"));
DictionaryLookup dl = new DictionaryLookup(dictionary);
Map<String,String> lc = new HashMap<>();
Map<String,String> uc = new HashMap<>();
System.out.println("Iterating...");
for (WordData wd : dl) {
String word = wd.getWord().toString();
String base = wd.getStem().toString();
if (startsWithLowercase(word) && startsWithUppercase(base) || startsWithUppercase(word) && startsWithLowercase(base)) {
// e.g. "Feilbieten"
continue;
}
String tag = wd.getTag().toString();
if (tag.endsWith(":INF") || tag.endsWith(":ADJ")) {
// "Das Laufen" etc.
continue;
}
if (!tag.startsWith("VER:") && !tag.startsWith("SUB:")) {
continue;
}
if (startsWithUppercase(word)) {
uc.put(word, tag);
} else if (startsWithLowercase(word)) {
lc.put(word, tag);
}
}
System.out.println("Done. lc=" + lc.size() + ", uc=" + uc.size());
for (Map.Entry<String, String> entry : uc.entrySet()) {
String key = lowercaseFirstChar(entry.getKey());
if (lc.containsKey(key)) {
//System.out.println(entry.getKey() + " " + entry.getValue() + " " + lc.get(key));
long lcCount = lm.getCount(lowercaseFirstChar(entry.getKey()));
long ucCount = lm.getCount(uppercaseFirstChar(entry.getKey()));
long sum = lcCount + ucCount;
System.out.println(sum + "\t" + lcCount + "\t" + ucCount + "\t" + entry.getKey());
}
}
| 70
| 577
| 647
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/GermanElLeAdjectives.java
|
GermanElLeAdjectives
|
main
|
class GermanElLeAdjectives {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
List<String> lines = Files.readAllLines(Paths.get(args[0]));
Set<String> elWords = new HashSet<>();
for (String line : lines) {
line = line.trim();
if (line.endsWith("el")) {
elWords.add(line);
}
}
for (String line : lines) {
if (line.endsWith("le") && elWords.contains(line.replaceFirst("le$", "el"))) {
System.out.println(line);
}
}
| 39
| 146
| 185
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/GermanOldSpellingFinder.java
|
GermanOldSpellingFinder
|
main
|
class GermanOldSpellingFinder {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
public static void main2(String[] args) throws IOException {
GermanyGerman lang = new GermanyGerman();
Synthesizer synth = lang.getSynthesizer();
String tmpWord = "hintergießen";
String[] formsAr = synth.synthesize(new AnalyzedToken(tmpWord, "FAKE", tmpWord), ".*", true);
System.out.println(Arrays.toString(formsAr));
System.exit(1);
}
}
|
GermanyGerman lang = new GermanyGerman();
Synthesizer synth = lang.getSynthesizer();
List<String> words = Files.readAllLines(Paths.get(args[0]));
int i = 0;
for (String word : words) {
if (i++ % 1000 == 0) {
System.out.println(i + "...");
}
if (!word.matches("^[a-zöäü].*")) {
continue;
}
String[] formsAr = synth.synthesize(new AnalyzedToken(word, "FAKE", word), ".*", true);
List<String> forms = Arrays.asList(formsAr);
for (String form : forms) {
if (form.matches(".*oß") && !forms.contains(form.replaceFirst("ß", "ss"))) {
System.out.println("No 'ss' form found: " + form);
}
}
}
| 158
| 253
| 411
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/InvalidGermanVerbFinder.java
|
InvalidGermanVerbFinder
|
main
|
class InvalidGermanVerbFinder {
private static final String SPELLING_FILE = "/home/dnaber/lt/git/languagetool/languagetool-language-modules/de/src/main/resources/org/languagetool/resource/de/hunspell/spelling.txt";
private static final String NGRAM_DIR = "/home/dnaber/data/google-ngram-index/de/";
private static final int THRESHOLD = 20;
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
LuceneLanguageModel lm = new LuceneLanguageModel(new File(NGRAM_DIR));
List<String> lines = Files.readAllLines(Paths.get(SPELLING_FILE));
for (String line : lines) {
if (line.startsWith("#")) {
continue;
}
if (line.contains("_")) {
line = line.replaceFirst("#.*", "");
String form = line.replace("_", "zu").trim();
long count = lm.getCount(form);
if (count < THRESHOLD) {
System.out.println(count + " " + form);
}
}
}
| 154
| 173
| 327
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/MissingEnglishPosFinder.java
|
MissingEnglishPosFinder
|
main
|
class MissingEnglishPosFinder {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static boolean noTag(AnalyzedTokenReadings atr) {
return !atr.isTagged();
}
}
|
if (args.length != 2) {
System.out.println("Usage: " + MissingEnglishPosFinder.class.getSimpleName() + " <file> <ngram_dir>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
//List<String> lines = Arrays.asList("bike", "nicexxx");
LuceneLanguageModel lm = new LuceneLanguageModel(new File(args[1]));
EnglishTagger tagger = new EnglishTagger();
for (String word : lines) {
int origCount = -1;
if (word.matches("\\d+ .*")) {
String[] parts = word.split(" ");
origCount = Integer.parseInt(parts[0]);
word = parts[1];
}
word = word.trim();
if (word.endsWith(".")) {
word = word.substring(0, word.length()-1);
}
List<AnalyzedTokenReadings> matches = tagger.tag(Collections.singletonList(word));
List<AnalyzedTokenReadings> lcMatches = tagger.tag(Collections.singletonList(word.toLowerCase()));
if (matches.size() == 1 && noTag(matches.get(0)) && lcMatches.size() == 1 && noTag(lcMatches.get(0))) {
long count = origCount == -1 ? lm.getCount(word) : origCount;
System.out.println(count + "\t" + word);
}
}
| 69
| 404
| 473
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/MissingGermanCompoundAdjectiveFinder.java
|
MissingGermanCompoundAdjectiveFinder
|
main
|
class MissingGermanCompoundAdjectiveFinder {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
/*public static void main2(String[] args) throws IOException {
List<String> lines = Files.readAllLines(Paths.get("/tmp/x"));
GermanSpellerRule speller = new GermanSpellerRule(JLanguageTool.getMessageBundle(), new GermanyGerman());
for (String line : lines) {
if (!speller.isMisspelled(line)) {
System.out.println(line);
}
}
}*/
}
|
List<String> lines = Files.readAllLines(Paths.get(args[0]));
Tagger tagger = new GermanyGerman().getTagger();
Set<String> printed = new HashSet<>();
int j = 0;
int splitCount = 0;
GermanWordSplitter splitter = new GermanWordSplitter(false);
for (String line : lines) {
if (!StringTools.startsWithLowercase(line)) {
continue;
}
String clean = line.replaceFirst("e[rmns]?$", "");
List<AnalyzedTokenReadings> tags = tagger.tag(Collections.singletonList(clean));
boolean isTagged = tags.stream().anyMatch(k -> k.isTagged());
if (isTagged) {
continue;
}
for (int i = clean.length()-2; i > 2; i--) {
String part1 = clean.substring(0, i);
String part2 = clean.substring(i);
List<AnalyzedTokenReadings> part1Tags = tagger.tag(Collections.singletonList(StringTools.uppercaseFirstChar(part1)));
List<AnalyzedTokenReadings> part2Tags = tagger.tag(Collections.singletonList(part2));
boolean part1isNoun = part1Tags.stream().anyMatch(k -> k.hasAnyPartialPosTag("SUB"));
boolean part2isAdj = part2Tags.stream().anyMatch(k -> k.hasAnyPartialPosTag("ADJ", "PA1", "PA2"));
if (part1isNoun && part2isAdj && !printed.contains(clean)) {
//System.out.println(part1 + " / " + part2 + " " + part2isAdj);
List<String> split = splitter.splitWord(clean);
if (split.size() > 1) {
splitCount++;
}
System.out.println(j + ". " + clean);
printed.add(clean);
j++;
}
}
}
System.out.println("splitCount: " + splitCount);
| 156
| 535
| 691
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/MissingGermanCompoundsFinder.java
|
MissingGermanCompoundsFinder
|
run
|
class MissingGermanCompoundsFinder {
private final GermanSpellerRule germanSpeller;
public MissingGermanCompoundsFinder() {
germanSpeller = new GermanSpellerRule(JLanguageTool.getMessageBundle(), new GermanyGerman());
}
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("Usage: " + MissingGermanCompoundsFinder.class.getSimpleName() + " <filename>");
System.exit(1);
}
String filename = args[0];
new MissingGermanCompoundsFinder().run(filename);
}
private void run(String filename) throws IOException {<FILL_FUNCTION_BODY>}
private boolean isKnownByGermanSpeller(String word) {
return !germanSpeller.isMisspelled(StringTools.uppercaseFirstChar(word)) ||
!germanSpeller.isMisspelled(StringTools.lowercaseFirstChar(word));
}
private BufferedReader getReaderForFilename(String filename) throws FileNotFoundException {
FileInputStream fis = new FileInputStream(filename);
InputStreamReader isr = new InputStreamReader(fis, StandardCharsets.UTF_8);
return new BufferedReader(isr);
}
}
|
System.out.println("# compound words not accepted by LT speller");
BufferedReader reader = getReaderForFilename(filename);
String line;
GermanWordSplitter splitter = new GermanWordSplitter(false);
Map<String,Integer> firstPartCount = new HashMap<>();
while ((line = reader.readLine()) != null) {
String word;
int count;
if (line.contains("\t")) {
count = Integer.parseInt(line.split("\t")[0]);
word = line.split("\t")[1];
} else {
count = 1;
word = line;
}
if (word.length() < 50 && StringTools.startsWithUppercase(word) && !isKnownByGermanSpeller(word)) {
List<String> wordParts = splitter.splitWord(word);
if (wordParts.size() > 1) {
String key = wordParts.get(0);
if (firstPartCount.containsKey(key)) {
firstPartCount.put(key, firstPartCount.get(key)+count);
} else {
firstPartCount.put(key, count);
}
}
}
}
for (Map.Entry<String, Integer> entry : firstPartCount.entrySet()) {
if (entry.getValue() > 0) {
boolean known = isKnownByGermanSpeller(entry.getKey() + "test");
System.out.println(entry.getValue() + " " + entry.getKey() + " " + known);
} else {
System.out.println(entry.getValue() + " " + entry.getKey());
}
}
| 337
| 426
| 763
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/MissingGermanPosFinder.java
|
MissingGermanPosFinder
|
main
|
class MissingGermanPosFinder {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 2) {
System.out.println("Usage: " + MissingGermanPosFinder.class.getSimpleName() + " <file> <ngram_dir>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
//List<String> lines = Arrays.asList("Bundesrepublik", "Landwirtschaft", "Perl", "Haus", "Drücke", "Wischdsda", "gut", "schönxxx");
LuceneLanguageModel lm = new LuceneLanguageModel(new File(args[1]));
GermanTagger tagger = new GermanTagger();
for (String word : lines) {
int origCount = -1;
if (word.matches("\\d+ .*")) {
String[] parts = word.split(" ");
origCount = Integer.parseInt(parts[0]);
word = parts[1];
}
word = word.trim();
if (word.endsWith(".")) {
word = word.substring(0, word.length()-1);
}
AnalyzedTokenReadings matches = tagger.lookup(word);
AnalyzedTokenReadings lcMatches = tagger.lookup(word.toLowerCase());
if ((matches == null || !matches.isTagged()) && (lcMatches == null || !lcMatches.isTagged())) {
long count = origCount == -1 ? lm.getCount(word) : origCount;
System.out.println(count + "\t" + word);
}
}
| 39
| 415
| 454
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/MissingGermanPosForms.java
|
MissingGermanPosForms
|
main
|
class MissingGermanPosForms {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
List<String> lines = Files.readAllLines(Paths.get("/home/dnaber/lt/git/languagetool/languagetool-language-modules/de/src/main/resources/org/languagetool/resource/de/hunspell/de_DE.dic"));
//List<String> lines = Arrays.asList("Beige", "Zoom", "Perl", "Haus", "Drücke", "Wisch");
GermanTagger tagger = new GermanTagger();
for (String line : lines) {
String word = line.replaceFirst("/.*", "");
if (StringTools.startsWithUppercase(word)) {
List<TaggedWord> ucMatches = tagger.tag(word);
List<TaggedWord> lcMatches = tagger.tag(StringTools.lowercaseFirstChar(word));
//System.out.println(word + " " + ucMatches + " " + lcMatches);
if (ucMatches.size() == 0 && lcMatches.size() > 0) {
System.out.println(word + " " + lcMatches);
}
}
}
| 38
| 302
| 340
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/MissingGermanWords.java
|
MissingGermanWords
|
listMissingWords
|
class MissingGermanWords {
private final String filename;
private final boolean outputCombinedListing;
private final GermanSpellerRule germanSpeller;
private final GermanTagger germanTagger;
private final MorfologikAmericanSpellerRule englishSpeller;
public MissingGermanWords(String filename) throws IOException {
this.filename = filename;
this.outputCombinedListing = true;
germanSpeller = new GermanSpellerRule(JLanguageTool.getMessageBundle(), new GermanyGerman());
germanTagger = new GermanTagger();
englishSpeller = new MorfologikAmericanSpellerRule(JLanguageTool.getMessageBundle(), new AmericanEnglish());
}
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("Usage: " + MissingGermanWords.class.getSimpleName() + " <filename>");
System.exit(1);
}
String filename = args[0];
new MissingGermanWords(filename).run();
}
private void run() throws IOException {
if (outputCombinedListing) {
listMissingWords(filename);
} else {
listMissingWordsSpeller(filename);
listMissingWordsTagger(filename);
}
}
private void listMissingWordsSpeller(String filename) throws java.io.IOException {
System.out.println("# missing words speller");
BufferedReader reader = getReaderForFilename(filename);
String line;
while ((line = reader.readLine()) != null) {
String word = wordFromLine(line);
if (!isKnownByGermanSpeller(word) && !isKnownByEnglishSpeller(word)) {
System.out.println(line);
}
}
reader.close();
}
private void listMissingWordsTagger(String filename) throws java.io.IOException {
System.out.println("# missing words tagger");
BufferedReader reader = getReaderForFilename(filename);
String line;
while ((line = reader.readLine()) != null) {
String word = wordFromLine(line);
if (!isKnownByGermanTagger(word) && !isKnownByEnglishSpeller(word)) {
System.out.println(line);
}
}
reader.close();
}
private void listMissingWords(String filename) throws java.io.IOException {<FILL_FUNCTION_BODY>}
private boolean isKnownByGermanSpeller(String word) {
return !germanSpeller.isMisspelled(StringTools.uppercaseFirstChar(word)) ||
!germanSpeller.isMisspelled(StringTools.lowercaseFirstChar(word));
}
private boolean isKnownByGermanTagger(String word) throws IOException {
return germanTagger.lookup(StringTools.uppercaseFirstChar(word)) != null ||
germanTagger.lookup(StringTools.lowercaseFirstChar(word)) != null;
}
private boolean isKnownByEnglishSpeller(String word) throws IOException {
return !englishSpeller.isMisspelled(StringTools.uppercaseFirstChar(word)) ||
!englishSpeller.isMisspelled(StringTools.lowercaseFirstChar(word));
}
private BufferedReader getReaderForFilename(String filename) throws FileNotFoundException {
FileInputStream fis = new FileInputStream(filename);
InputStreamReader isr = new InputStreamReader(fis, StandardCharsets.UTF_8);
return new BufferedReader(isr);
}
private String wordFromLine(String line) {
return line.split(",")[0];
}
}
|
BufferedReader reader = getReaderForFilename(filename);
String line;
while ((line = reader.readLine()) != null) {
String word = wordFromLine(line);
boolean knownBySpeller = isKnownByGermanSpeller(word);
boolean knownByTagger = isKnownByGermanTagger(word);
if ((!knownBySpeller || !knownByTagger) && !isKnownByEnglishSpeller(word)) {
System.out.print(line);
System.out.print(",");
if (!knownBySpeller && !knownByTagger) {
System.out.println("speller+tagger");
} else if (!knownBySpeller) {
System.out.println("speller");
} else {
System.out.println("tagger");
}
}
}
reader.close();
| 954
| 221
| 1,175
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/MissingPortuguesePosFinder.java
|
MissingPortuguesePosFinder
|
getOccurrences
|
class MissingPortuguesePosFinder {
public static void main(String[] args) throws IOException {
if (args.length != 2) {
System.out.println("Usage: " + MissingPortuguesePosFinder.class.getSimpleName() + " <file> <gaia_file>");
System.out.println(" <gaia_file> is e.g. pt_br_wordlist.xml from https://github.com/mozilla-b2g/gaia/tree/master/apps/keyboard/js/imes/latin/dictionaries");
System.exit(1);
}
Map<String, Integer> occ = getOccurrences(new File(args[1]));
List<String> lines = Files.readAllLines(Paths.get(args[0]));
//List<String> lines = Arrays.asList("DC");
PortugueseTagger tagger = new PortugueseTagger();
for (String word : lines) {
int origCount = -1;
if (word.matches("\\d+ .*")) {
String[] parts = word.split(" ");
origCount = Integer.parseInt(parts[0]);
word = parts[1];
}
word = word.trim();
if (word.endsWith(".")) {
word = word.substring(0, word.length()-1);
}
List<AnalyzedTokenReadings> matches = tagger.tag(Collections.singletonList(word));
List<AnalyzedTokenReadings> lcMatches = tagger.tag(Collections.singletonList(word.toLowerCase()));
if (matches.size() == 1 && noTag(matches.get(0)) && lcMatches.size() == 1 && noTag(lcMatches.get(0))) {
if (occ.containsKey(word)) {
long count = origCount == -1 ? occ.get(word) : origCount;
System.out.println(count + "\t" + word);
}
}
}
}
private static Map<String,Integer> getOccurrences(File gaiaXmlFile) throws IOException {<FILL_FUNCTION_BODY>}
private static boolean noTag(AnalyzedTokenReadings atr) {
return !atr.isTagged();
}
}
|
List<String> lines = Files.readAllLines(gaiaXmlFile.toPath());
Map<String,Integer> map = new HashMap<>();
Pattern p = Pattern.compile("<w f=\"(\\d+)\" flags=\".*?\">(.*?)</w>");
for (String line : lines) {
line = line.trim();
if (line.startsWith("<w ")) {
Matcher matcher = p.matcher(line);
if (matcher.matches()) {
int occ = Integer.parseInt(matcher.group(1));
String word = matcher.group(2);
//System.out.println(occ + " " + word);
map.put(word, occ);
} else {
System.out.println("Skipping line, doesn't match regex: " + line);
}
} else {
System.out.println("Skipping line: " + line);
}
}
return map;
| 577
| 254
| 831
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/MissingRussianPosFinder.java
|
MissingRussianPosFinder
|
main
|
class MissingRussianPosFinder {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static boolean noTag(AnalyzedTokenReadings atr) {
return !atr.isTagged();
}
}
|
if (args.length != 1) {
System.out.println("Usage: " + MissingRussianPosFinder.class.getSimpleName() + " <file> ");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
RussianTagger tagger = new RussianTagger();
for (String word : lines) {
int origCount = -1;
if (word.matches("\\d+ .*")) {
String[] parts = word.split(" ");
origCount = Integer.parseInt(parts[0]);
word = parts[1];
}
word = word.trim();
if (word.endsWith(".")) {
word = word.substring(0, word.length()-1);
}
List<AnalyzedTokenReadings> matches = tagger.tag(Collections.singletonList(word));
List<AnalyzedTokenReadings> lcMatches = tagger.tag(Collections.singletonList(word.toLowerCase()));
if (matches.size() == 1 && noTag(matches.get(0)) && lcMatches.size() == 1 && noTag(lcMatches.get(0))) {
System.out.println( word);
}
}
| 69
| 325
| 394
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/NGramLangIdentifierPerformanceTest.java
|
NGramLangIdentifierPerformanceTest
|
testPerformance
|
class NGramLangIdentifierPerformanceTest {
private static final File ngramZip = new File("/home/languagetool/ngram-lang-id/model_ml50_new.zip");
private static final Path input = Paths.get("/home/dnaber/data/corpus/tatoeba/20191014/sentences_shuf.txt");
private static final int limit = 10_000;
public void testPerformance() throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
new NGramLangIdentifierPerformanceTest().testPerformance();
}
}
|
System.out.println("Loading ngrams...");
NGramDetector ngram = new NGramDetector(ngramZip, 50);
System.out.println("Loaded.");
int i = 0;
double totalMillis = 0;
long totalLength = 0;
long totalRuns = 0;
System.out.println("Loading input...");
List<String> lines = Files.readAllLines(input);
System.out.println("Loaded " + lines.size() + " lines.");
for (String line : lines) {
long startTime = System.nanoTime();
Map<String, Double> detectLanguages = ngram.detectLanguages(line, null);
long endTime = System.nanoTime();
double runTimeMillis = (endTime - startTime) / 1000.0f / 1000.0f;
if (i > 10) {
totalMillis += runTimeMillis;
totalLength += line.length();
totalRuns++;
//System.out.println(line.length() + " chars took " + runTimeMillis + "ms -> " + detectLanguages);
//System.out.println(line.length() + " chars took " + runTimeMillis + "ms");
if (runTimeMillis > 5) {
System.out.println(line.length() + " chars took " + runTimeMillis + "ms for text: " + line);
}
} else {
System.out.println("Skipping early run " + i);
}
if (i > limit) {
System.out.println("Stopping test at limit " + limit);
break;
}
i++;
}
System.out.println("Runs: " + totalRuns);
System.out.printf(Locale.ENGLISH, "Avg. length: %.2f chars\n", (double)totalLength/totalRuns);
System.out.printf(Locale.ENGLISH, "Avg: %.2fms\n", totalMillis/totalRuns);
| 175
| 533
| 708
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/NoSuggestionRuleList.java
|
NoSuggestionRuleList
|
main
|
class NoSuggestionRuleList {
// format: rule_id,match_propability_as_float (and optionally more columns)
// e.g.:
// MORFOLOGIK_RULE_NL_NL,0.596809797834639
// EINDE_ZIN_ONVERWACHT,0.2137227907162415
private final static String POPULARITY_FILE = "/home/dnaber/Downloads/rule_matches_nl_1w_detailed.csv"; // set to null to skip
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static void printRule(String id, Rule rule, String incorrectExample, Map<String, Float> popularity) {
Float pop = popularity.get(rule.getId());
if (pop != null) {
System.out.printf(Locale.ENGLISH, "%.4f " + id + "\n", pop);
} else {
System.out.println("0 " + id);
}
//System.out.println("--> "+incorrectExample);
}
}
|
if (args.length < 1) {
System.out.println("Usage: " + NoSuggestionRuleList.class.getSimpleName() + " <langCodes>");
System.exit(1);
}
Map<String,Float> popularity = new HashMap<>();
if (POPULARITY_FILE != null) {
List<String> lines = Files.readAllLines(Paths.get(POPULARITY_FILE));
for (String line : lines) {
String[] parts = line.split(",");
try {
popularity.put(parts[0], Float.parseFloat(parts[1]));
} catch (NumberFormatException e) {
System.err.println("Ignoring line: " + line + ", " + e.getMessage());
}
}
}
System.out.println("Loaded " + popularity.size() + " popularity mappings");
for (String langCode : args) {
Language lang = Languages.getLanguageForShortCode(langCode);
JLanguageTool lt = new JLanguageTool(lang);
for (Rule rule : lt.getAllActiveRules()) {
lt.disableRule(rule.getId());
}
int suggestion = 0;
int noSuggestion = 0;
for (Rule rule : lt.getAllRules()) {
if (rule.isDefaultOff()) {
continue;
}
List<IncorrectExample> incorrectExamples = rule.getIncorrectExamples();
if (incorrectExamples.isEmpty()) {
//System.err.println("Skipping " + rule.getId() + " (no example)");
continue;
}
String incorrectExample = incorrectExamples.get(0).getExample().replaceAll("<marker>", "").replaceAll("</marker>", "");
lt.enableRule(rule.getId());
List<RuleMatch> matches = lt.check(incorrectExample);
for (RuleMatch match : matches) {
if (match.getSuggestedReplacements().isEmpty()) {
//if (rule instanceof AbstractPatternRule) {
// printRule(((AbstractPatternRule)rule).getFullId(), rule, incorrectExample, popularity);
//} else {
// printRule(rule.getId(), rule, incorrectExample, popularity);
//}
printRule(rule.getId(), rule, incorrectExample, popularity);
noSuggestion++;
} else {
suggestion++;
}
break;
}
lt.disableRule(rule.getId());
}
System.out.println(lang + ":");
System.out.printf("With suggestion : %d\n", suggestion);
System.out.printf("Without suggestion: %d (%.2f%%)\n", noSuggestion, ((float)noSuggestion / (suggestion + noSuggestion))*100.0);
System.out.println();
}
| 308
| 734
| 1,042
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/ProhibitComparator.java
|
ProhibitComparator
|
main
|
class ProhibitComparator {
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static boolean hasPrefixLine(String word, List<String> expanded2) {
for (String line : expanded2) {
if (line.endsWith(".*") && word.startsWith(line.substring(0, line.length()-2))) {
//System.out.println("!!"+line.substring(0, line.length()-2));
return true;
}
}
return false;
}
@NotNull
private static List<String> getExpandedLines(String filename) throws IOException {
LineExpander expander = new LineExpander();
List<String> lines = Files.readAllLines(Paths.get(filename));
List<String> expanded = new ArrayList<>();
for (String line : lines) {
if (line.startsWith("#")) {
continue;
}
expanded.addAll(expander.expandLine(line));
}
return expanded;
}
}
|
if (args.length != 2) {
System.out.println("Usage: " + ProhibitComparator.class.getName() + " <oldFile> <newFile>");
System.exit(1);
}
List<String> expanded1 = getExpandedLines(args[0]);
List<String> expanded2 = getExpandedLines(args[1]);
System.out.println("Words removed in " + args[1] + ":");
System.out.println("*** NOTE: result might not be accurate for words with '.*'");
for (String word : expanded1) {
if (!expanded2.contains(word) && !hasPrefixLine(word, expanded2)) {
System.out.println(word);
}
}
| 273
| 195
| 468
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/RegexExpander.java
|
RegexExpander
|
main
|
class RegexExpander {
private final static List<String> entities = Arrays.asList(
" <!ENTITY conjuncoes_coordenativas \"(?:e(?:ntão|ntretanto)?|p(?:ois|orém|or(?:t|qu)anto)|mas|ou|nem|contudo|logo|todavia)\">");
private final static String wordListFile = "/home/dnaber/lt/pt-words.txt";
private final static Set<String> printed = new HashSet<>();
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static void printToken(int i, String s) {
if (i == 0) {
System.out.print(s);
} else {
System.out.print("|" + s);
}
printed.add(s);
}
}
|
//Pattern tempP = Pattern.compile("(?:e(?:stere|ur|g)|(?:cent|sac)r|a(?:str|udi)|t(?:erm|urb)|f(?:il|ot)|i(?:ntr|d)|bronc|labi|mon|vas|zo)o");
//System.out.println(tempP.matcher("estereo").matches());
//System.exit(0);
List<String> lines = Files.readAllLines(Paths.get(wordListFile));
for (String s : entities) {
Matcher matcher = Pattern.compile("<!ENTITY (.*?) ").matcher(s);
boolean found = matcher.find();
if (!found) {
System.out.println("Entity name not found: " + s);
}
String entityName = matcher.group(1);
s = s.replaceFirst("<!ENTITY .*? \"(.*)\">", "$1").trim();
System.out.print("<!ENTITY " + entityName + " \"");
Pattern p = Pattern.compile(s);
int i = 0;
for (String line : lines) {
line = line.trim();
boolean lcMatch = false;
boolean ucMatch = false;
if (p.matcher(line).matches()) {
lcMatch = true;
}
if (StringTools.startsWithLowercase(line) && p.matcher(uppercaseFirstChar(line)).matches()) {
ucMatch = true;
}
if (lcMatch && ucMatch) {
printToken(i, "[" + uppercaseFirstChar(line).charAt(0) + StringTools.lowercaseFirstChar(line).charAt(0) + "]" + line.substring(1));
i++;
} else if (lcMatch && !printed.contains(line)) {
printToken(i, line);
i++;
} else if (ucMatch && !printed.contains(uppercaseFirstChar(line))) {
printToken(i, uppercaseFirstChar(line));
i++;
}
}
System.out.println("\">");
}
| 235
| 562
| 797
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/RuleActivityOverview.java
|
RuleActivityOverview
|
run
|
class RuleActivityOverview {
private static final int PAST_DAYS = 365/2;
RuleActivityOverview() {
}
private void run() {<FILL_FUNCTION_BODY>}
int getActivityFor(Language lang, int pastDays) {
try {
Calendar past = GregorianCalendar.getInstance();
past.add(Calendar.DAY_OF_MONTH, -pastDays);
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
String pastString = dateFormat.format(past.getTime());
String langCode = lang.getShortCode();
List<File> xmlFiles = getAllXmlFiles(lang, langCode);
int commits = 0;
for (File file : xmlFiles) {
if (!file.getName().contains("-test-") && !file.exists()) {
throw new RuntimeException("Not found: " + file);
}
String command = "git log --after=" + pastString + " " + file;
Runtime runtime = Runtime.getRuntime();
Process process = runtime.exec(command);
InputStream inputStream = process.getInputStream();
String output = StringTools.readStream(inputStream, "utf-8");
process.waitFor();
commits += getCommits(output);
}
return commits;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private List<File> getAllXmlFiles(Language lang, String langCode) {
List<File> files = new ArrayList<>();
List<String> ruleFileNames = lang.getRuleFileNames();
for (String ruleFileName : ruleFileNames) {
files.add(new File("../languagetool-language-modules/" + langCode + "/src/main/resources/" + ruleFileName));
}
File disambiguationFile = new File("../languagetool-language-modules/" + langCode +
"/src/main/resources/org/languagetool/resource/" + langCode + "/disambiguation.xml");
if (disambiguationFile.exists()) {
files.add(disambiguationFile);
}
return files;
}
private int getCommits(String svnOutput) {
int count = 0;
try (Scanner scanner = new Scanner(svnOutput)) {
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
if (line.startsWith("commit ")) {
count++;
}
}
}
return count;
}
public static void main(String[] args) throws Exception {
RuleActivityOverview prg = new RuleActivityOverview();
prg.run();
}
}
|
System.out.println("Commits per language in the last " + PAST_DAYS + " days");
System.out.println("Date: " + new SimpleDateFormat("yyyy-MM-dd").format(new Date()));
List<String> sortedLanguages = new ArrayList<>();
for (Language element : Languages.get()) {
sortedLanguages.add(element.getName());
}
Collections.sort(sortedLanguages);
for (String langName : sortedLanguages) {
Language lang = Languages.getLanguageForName(langName);
int commits = getActivityFor(lang, PAST_DAYS);
System.out.println(commits + "\t" + lang.getName() + (lang.isVariant() ? " (including the parent language)" : ""));
}
| 693
| 202
| 895
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/RuleDetails.java
|
RuleDetails
|
main
|
class RuleDetails {
private final List<Rule> rules;
public RuleDetails(Language lang, @Nullable String ngramPath) throws IOException {
JLanguageTool lt = new JLanguageTool(lang);
if (ngramPath != null) {
lt.activateLanguageModelRules(new File(ngramPath));
}
rules = lt.getAllRules();
}
@Nullable
private Rule getRuleById(String ruleId) {
return rules.stream()
.filter(r -> r.getId().equals(ruleId))
.findFirst().orElse(null);
}
public static void main(String[] args) throws ParseException, IOException {<FILL_FUNCTION_BODY>}
}
|
Options options = new Options();
options.addRequiredOption("l", "language", true, "Language for rules");
options.addRequiredOption("f", "file", true, "Input file");
options.addRequiredOption("o", "output", true, "Output file");
options.addRequiredOption("c", "column", true, "Column in input file");
options.addOption("n", "ngramPath", true, "Ngram path to activate ngram rules");
CommandLine cmd = new DefaultParser().parse(options, args);
String langCode = cmd.getOptionValue('l');
String inputFile = cmd.getOptionValue('f');
String outputFile = cmd.getOptionValue('o');
String column = cmd.getOptionValue('c');
String ngramPath = cmd.hasOption('n') ? cmd.getOptionValue('n') : null;
RuleDetails details = new RuleDetails(Languages.getLanguageForShortCode(langCode), ngramPath);
CSVFormat format = CSVFormat.RFC4180.withFirstRecordAsHeader();
try (CSVParser parser = CSVParser.parse(new File(inputFile), Charset.defaultCharset(), format)) {
try (CSVPrinter printer = new CSVPrinter(new BufferedWriter(new FileWriter(outputFile)), format)) {
Map<String, Integer> oldHeader = parser.getHeaderMap();
List<String> newHeader = new ArrayList<>(Collections.nCopies(oldHeader.size(), null));
for (Map.Entry<String, Integer> entry : oldHeader.entrySet()) {
newHeader.set(entry.getValue(), entry.getKey());
}
newHeader.add("description");
newHeader.add("category");
printer.printRecord(newHeader);
if (!oldHeader.containsKey(column)) {
throw new RuntimeException("Input file does not contain specified column " + column);
}
List<CSVRecord> records = parser.getRecords();
records.stream().sequential().map(record -> {
String ruleId = record.get(column);
Rule rule = details.getRuleById(ruleId);
List<String> transformedValues = new ArrayList<>();
record.iterator().forEachRemaining(transformedValues::add);
if (rule == null) {
transformedValues.add("");
transformedValues.add("");
} else {
transformedValues.add(rule.getDescription());
transformedValues.add(rule.getCategory().getId().toString());
}
return transformedValues;
}).forEachOrdered(values -> {
try {
printer.printRecord(values);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
}
| 190
| 690
| 880
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/SentenceChecker.java
|
SentenceChecker
|
run
|
class SentenceChecker {
private static final int BATCH_SIZE = 1000;
private void run(Language language, File file) throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
if (args.length != 2) {
System.err.println("Usage: " + SentenceChecker.class.getSimpleName() + " <langCode> <sentenceFile>");
System.exit(1);
}
SentenceChecker checker = new SentenceChecker();
checker.run(Languages.getLanguageForShortCode(args[0]), new File(args[1]));
}
}
|
JLanguageTool lt = new JLanguageTool(language);
try (Scanner scanner = new Scanner(file)) {
int count = 0;
long startTime = System.currentTimeMillis();
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
lt.check(line);
if (++count % BATCH_SIZE == 0) {
long time = System.currentTimeMillis() - startTime;
System.out.println(count + ". " + time + "ms per " + BATCH_SIZE + " sentences");
startTime = System.currentTimeMillis();
}
}
}
| 174
| 166
| 340
|
<no_super_class>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.