proj_name
stringclasses 131
values | relative_path
stringlengths 30
228
| class_name
stringlengths 1
68
| func_name
stringlengths 1
48
| masked_class
stringlengths 78
9.82k
| func_body
stringlengths 46
9.61k
| len_input
int64 29
2.01k
| len_output
int64 14
1.94k
| total
int64 55
2.05k
| relevant_context
stringlengths 0
38.4k
|
|---|---|---|---|---|---|---|---|---|---|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/SentenceSplitter.java
|
SentenceSplitter
|
run
|
class SentenceSplitter {
private static final int BATCH_SIZE = 1000;
private void run(Language language, File inputFile, PrintWriter outputFile) throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
if (args.length != 3) {
System.err.println(
"Usage: " + SentenceSplitter.class.getSimpleName() + " <langCode> <inputFile or folderFile> <outputFile>");
System.exit(1);
}
SentenceSplitter splitter = new SentenceSplitter();
PrintWriter output = new PrintWriter(args[2]);
File folder = new File(args[1]);
if (folder.isFile()) {
splitter.run(Languages.getLanguageForShortCode(args[0]), folder, output);
}
if (folder.isDirectory()) {
for (final File fileEntry : folder.listFiles()) {
if (fileEntry.isFile()) {
splitter.run(Languages.getLanguageForShortCode(args[0]), fileEntry, output);
}
}
}
output.close();
}
}
|
JLanguageTool lt = new JLanguageTool(language);
try (Scanner scanner = new Scanner(inputFile)) {
int count = 0;
long startTime = System.currentTimeMillis();
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
List<String> sentences = lt.sentenceTokenize(line);
for (String sentence : sentences) {
String cleanSentence = sentence.trim();
if (!cleanSentence.isEmpty()) {
outputFile.println(cleanSentence);
}
}
if (++count % BATCH_SIZE == 0) {
long time = System.currentTimeMillis() - startTime;
System.out.println(count + ". " + time + "ms per " + BATCH_SIZE + " sentences");
startTime = System.currentTimeMillis();
}
}
}
| 299
| 225
| 524
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/UnknownWordFinder.java
|
UnknownWordFinder
|
getSpellingCheckRule
|
class UnknownWordFinder {
private final Map<String,Integer> unknownWords = new HashMap<>();
private final Set<String> unknownSpelling = new HashSet<>();
private final Set<String> unknownTag = new HashSet<>();
private void run(File dir, JLanguageTool lt) throws IOException {
SpellingCheckRule spellerRule = getSpellingCheckRule(lt);
Tagger tagger = lt.getLanguage().getTagger();
List<Path> files = Files.walk(dir.toPath()).filter(Files::isRegularFile).collect(Collectors.toList());
for (Path file : files) {
handle(file, lt, spellerRule, tagger);
}
printResult(unknownWords);
}
@NotNull
private SpellingCheckRule getSpellingCheckRule(JLanguageTool lt) {<FILL_FUNCTION_BODY>}
private void handle(Path f, JLanguageTool lt, SpellingCheckRule rule, Tagger tagger) throws IOException {
String text = null;
if (f.toString().toLowerCase().endsWith(".txt")) {
List<String> lines = Files.readAllLines(f);
text = String.join(" ", lines);
} else if (f.toString().toLowerCase().endsWith(".rtf")) {
text = getTextFromRtf(f);
} else {
System.out.println("Ignoring " + f + ": unknown suffix");
}
if (text != null) {
System.out.println("Working on " + f);
List<AnalyzedSentence> analyzedSentences = lt.analyzeText(text);
for (AnalyzedSentence analyzedSentence : analyzedSentences) {
AnalyzedTokenReadings[] tokens = analyzedSentence.getTokensWithoutWhitespace();
for (AnalyzedTokenReadings token : tokens) {
String t = token.getToken();
boolean misspelled = !t.matches("[\\d%$]+") && rule.isMisspelled(t);
if (misspelled) {
unknownSpelling.add(t);
}
List<AnalyzedTokenReadings> tags = tagger.tag(Collections.singletonList(t));
boolean noTag = tags.size() == 1 && !tags.get(0).isTagged() && !t.matches("[\\d%$]+");
if (noTag) {
unknownTag.add(t);
}
if (misspelled || noTag) {
if (unknownWords.containsKey(t)) {
unknownWords.put(t, unknownWords.get(t) + 1);
} else {
unknownWords.put(t, 1);
}
}
}
}
}
}
private String getTextFromRtf(Path f) throws IOException {
JEditorPane p = new JEditorPane();
p.setContentType("text/rtf");
EditorKit rtfKit = p.getEditorKitForContentType("text/rtf");
try {
rtfKit.read(new FileReader(f.toFile()), p.getDocument(), 0);
Writer writer = new StringWriter();
EditorKit txtKit = p.getEditorKitForContentType("text/plain");
txtKit.write(writer, p.getDocument(), 0, p.getDocument().getLength());
return writer.toString();
} catch (BadLocationException e) {
System.err.println("Problem running on " + f + ": " + e.getMessage());
return null;
}
}
private void printResult(Map<String, Integer> unknownWords) {
List<CountedWord> countedWords = new ArrayList<>();
for (Map.Entry<String, Integer> entry : unknownWords.entrySet()) {
countedWords.add(new CountedWord(entry.getKey(), entry.getValue()));
}
Collections.sort(countedWords);
System.out.println("== RESULT ==");
System.out.println("count\tterm\tunknownSpelling\tunknownTag");
for (CountedWord countedWord : countedWords) {
String t = countedWord.word;
System.out.println(countedWord.count + "\t" + t + "\t" + unknownSpelling.contains(t) + "\t" + unknownTag.contains(t));
}
}
static class CountedWord implements Comparable<CountedWord> {
int count;
String word;
CountedWord(String key, Integer value) {
word = key;
count = value;
}
@Override
public int compareTo(@NotNull CountedWord countedWord) {
return Integer.compare(countedWord.count, count);
}
}
public static void main(String[] args) throws IOException {
if (args.length != 2) {
System.out.println("Usage: " + UnknownWordFinder.class.getSimpleName() + " <langCode> <dir>");
System.exit(1);
}
JLanguageTool lt = new JLanguageTool(Languages.getLanguageForShortCode(args[0]));
new UnknownWordFinder().run(new File(args[1]), lt);
}
}
|
SpellingCheckRule spellerRule = null;
for (Rule rule : lt.getAllActiveRules()) {
if (rule.isDictionaryBasedSpellingRule()) {
if (spellerRule != null) {
throw new RuntimeException("Found more than one spell rule: " + rule + ", " + spellerRule);
}
spellerRule = (SpellingCheckRule) rule;
}
}
if (spellerRule == null) {
throw new RuntimeException("No speller rule found for " + lt.getLanguage());
}
return spellerRule;
| 1,325
| 148
| 1,473
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/UnknownWordFinderForCsv.java
|
UnknownWordFinderForCsv
|
main
|
class UnknownWordFinderForCsv {
private void run(File dir, JLanguageTool lt) throws IOException {
SpellingCheckRule spellerRule = getSpellingCheckRule(lt);
List<Path> files = Files.walk(dir.toPath()).filter(Files::isRegularFile).collect(Collectors.toList());
for (Path file : files) {
handle(file, spellerRule);
}
}
@NotNull
private SpellingCheckRule getSpellingCheckRule(JLanguageTool lt) {
SpellingCheckRule spellerRule = null;
for (Rule rule : lt.getAllActiveRules()) {
if (rule.isDictionaryBasedSpellingRule()) {
if (spellerRule != null) {
throw new RuntimeException("Found more than one spell rule: " + rule + ", " + spellerRule);
}
spellerRule = (SpellingCheckRule) rule;
}
}
if (spellerRule == null) {
throw new RuntimeException("No speller rule found for " + lt.getLanguage());
}
return spellerRule;
}
private void handle(Path f, SpellingCheckRule rule) throws IOException {
int i = 0;
if (f.toString().toLowerCase().endsWith(".csv")) {
List<String> lines = Files.readAllLines(f);
for (String line : lines) {
String[] parts = line.split(",");
String word = parts[0].replace("\"", "");
if (rule.isMisspelled(word)) {
System.out.println(line);
i++;
}
}
} else {
System.out.println("Ignoring " + f + ": unknown suffix");
}
System.out.println("Lines printed: " + i);
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 2) {
System.out.println("Usage: " + UnknownWordFinderForCsv.class.getSimpleName() + " <langCode> <dir>");
System.exit(1);
}
JLanguageTool lt = new JLanguageTool(Languages.getLanguageForShortCode(args[0]));
new UnknownWordFinderForCsv().run(new File(args[1]), lt);
| 489
| 112
| 601
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/XmlIndenter.java
|
XmlIndenter
|
main
|
class XmlIndenter {
private static final int INDENT = 2;
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + XmlIndenter.class.getSimpleName() + " <xmlFile>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]), StandardCharsets.UTF_8);
boolean inCategory = false;
boolean inRuleGroup = false;
boolean inRule = false;
boolean inAntiPattern = false;
boolean inPattern = false;
boolean inMarker = false;
boolean inAnd = false;
boolean inUnify = false;
boolean inUnifyIgnore = false;
boolean inToken = false;
for (String line : lines) {
String origLine = line;
line = line.trim();
if (line.contains("</marker>") && (inPattern || inAntiPattern)) { inMarker = false; }
if (line.startsWith("</antipattern>")) { inAntiPattern = false; }
if (line.startsWith("</pattern>")) { inPattern = false; }
if (line.startsWith("</rule>")) { inRule = false; }
if (line.startsWith("</rulegroup")) { inRuleGroup = false; }
if (line.startsWith("</category")) { inCategory = false; }
if (line.startsWith("</and")) { inAnd = false; }
if (line.startsWith("</unify>")) { inUnify = false; }
if (line.startsWith("</unify-ignore>")) { inUnifyIgnore = false; }
int level = INDENT + (inCategory ? INDENT : 0) + (inRuleGroup ? INDENT : 0) + (inRule ? INDENT : 0) +
(inPattern ? INDENT : 0) + (inAntiPattern ? INDENT : 0) + (inMarker ? INDENT : 0) + (inToken ? INDENT : 0) +
(inAnd ? INDENT : 0) + (inUnify ? INDENT : 0) + (inUnifyIgnore ? INDENT : 0);
if (line.startsWith("<category") || line.startsWith("</category")) {
level = INDENT;
}
if (line.equals("</token>")) {
level -= INDENT;
}
String indentSpaces = StringUtils.repeat(' ', level);
if (!line.isEmpty() && (inCategory || line.startsWith("<category") || line.startsWith("</category"))) {
System.out.println(indentSpaces + line);
} else {
System.out.println(origLine);
}
if (line.startsWith("<category")) { inCategory = true; }
if (line.startsWith("<rulegroup")) { inRuleGroup = true; }
if (line.startsWith("<rule ") || line.startsWith("<rule>")) { inRule = true; }
if (line.startsWith("<pattern")) { inPattern = true; }
if (line.startsWith("<antipattern") && !line.contains("</antipattern")) { inAntiPattern = true; }
if (line.contains("<marker>") && !line.contains("</marker>") && (inPattern || inAntiPattern)) { inMarker = true; }
if (line.contains("<and>")) { inAnd = true; }
if (line.contains("<unify>") || line.contains("<unify ")) { inUnify = true; }
if (line.contains("<unify-ignore>")) { inUnifyIgnore = true; }
if (line.contains("</token>") || (line.contains("<token") && line.contains("/>")) && (inPattern || inAntiPattern)) { inToken = false; }
if (line.contains("<token") && !line.contains("/>") && !line.contains("</token>") && (inPattern || inAntiPattern)) {
inToken = true;
}
}
| 49
| 1,052
| 1,101
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/XmlUsageCounter.java
|
XmlUsageCounter
|
add
|
class XmlUsageCounter {
private final Map<String,Integer> map = new HashMap<>();
private void countElementsAndAttributes(InputStream in) throws XMLStreamException {
XMLInputFactory inputFactory = XMLInputFactory.newInstance();
XMLEventReader eventReader = inputFactory.createXMLEventReader(in);
while (eventReader.hasNext()) {
XMLEvent event = eventReader.nextEvent();
if (event.isStartElement()) {
String elementName = event.asStartElement().getName().getLocalPart();
add(elementName);
Iterator attributes = event.asStartElement().getAttributes();
while (attributes.hasNext()) {
Attribute att = (Attribute) attributes.next();
add(elementName + "/" + att.getName());
}
}
}
}
private void add(String name) {<FILL_FUNCTION_BODY>}
private void printResult() {
List<ElemCount> elemCounts = new ArrayList<>();
for (Map.Entry<String, Integer> entry : map.entrySet()) {
elemCounts.add(new ElemCount(entry.getKey(), entry.getValue()));
}
Collections.sort(elemCounts, (ec1, ec2) -> ec2.count - ec1.count);
for (ElemCount elemCount : elemCounts) {
System.out.println(elemCount.count + " " + elemCount.elem);
}
}
public static void main(String[] args) throws XMLStreamException {
XmlUsageCounter counter = new XmlUsageCounter();
Set<String> countedFiles = new HashSet<>();
for (Language language : Languages.get()) {
List<String> ruleFileNames = language.getRuleFileNames();
//comment in this to count disambiguation files instead:
//List<String> ruleFileNames = Collections.singletonList(ResourceDataBroker.RESOURCE_DIR + "/" +
// language.getShortCode() + "/" + "disambiguation.xml");
for (String ruleFileName : ruleFileNames) {
if (countedFiles.contains(ruleFileName)) {
continue;
}
System.err.println("Counting elements for " + ruleFileName);
InputStream ruleStream = XmlUsageCounter.class.getResourceAsStream(ruleFileName);
if (ruleStream == null) {
System.err.println("Not found, ignoring: " + ruleFileName);
continue;
}
counter.countElementsAndAttributes(ruleStream);
countedFiles.add(ruleFileName);
}
}
counter.printResult();
}
static class ElemCount {
String elem;
Integer count;
ElemCount(String elem, Integer count) {
this.elem = elem;
this.count = count;
}
}
}
|
if (map.containsKey(name)) {
int oldCount = map.get(name);
map.put(name, oldCount+1);
} else {
map.put(name, 1);
}
| 717
| 59
| 776
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/ExampleSentenceCorrectionCreator.java
|
ExampleSentenceCorrectionCreator
|
addAttribute
|
class ExampleSentenceCorrectionCreator {
private int addedCorrectionsCount = 0;
private void run(Language lang) throws IOException {
File basePath = new File("/lt/git/languagetool/languagetool-language-modules");
if (!basePath.exists()) {
throw new RuntimeException("basePath does not exist: " + basePath);
}
String langCode = lang.getShortCode();
File xml = new File(basePath, "/" + langCode + "/src/main/resources/org/languagetool/rules/" + langCode + "/grammar.xml");
List<String> xmlLines = IOUtils.readLines(new FileReader(xml));
JLanguageTool tool = new JLanguageTool(lang);
for (Rule rule : tool.getAllRules()) {
if (!(rule instanceof PatternRule)) {
continue;
}
List<IncorrectExample> incorrectExamples = rule.getIncorrectExamples();
for (IncorrectExample incorrectExample : incorrectExamples) {
checkCorrections(rule, incorrectExample, xmlLines, tool);
}
}
System.err.println("Added corrections: " + addedCorrectionsCount);
for (String xmlLine : xmlLines) {
System.out.println(xmlLine);
}
}
private void checkCorrections(Rule rule, IncorrectExample incorrectExample, List<String> xmlLines, JLanguageTool tool) throws IOException {
List<String> corrections = incorrectExample.getCorrections();
if (corrections.isEmpty()) {
for (Rule r : tool.getAllActiveRules()) {
tool.disableRule(r.getId());
}
tool.enableRule(rule.getId());
String incorrectSentence = incorrectExample.getExample().replaceAll("</?marker>", "");
List<RuleMatch> matches = tool.check(incorrectSentence);
System.err.println("no corrections: " + rule.getId() + ", " + matches.size() + " matches");
if (matches.isEmpty()) {
throw new RuntimeException("Got no rule match: " + incorrectSentence);
}
List<String> suggestedReplacements = matches.get(0).getSuggestedReplacements();
String newAttribute = "correction=\"" + String.join("|", suggestedReplacements) + "\"";
addAttribute(rule, newAttribute, xmlLines);
}
}
// Note: this is a bad hack, we just iterate through the file's lines
private void addAttribute(Rule rule, String newAttribute, List<String> xmlLines) {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
ExampleSentenceCorrectionCreator prg = new ExampleSentenceCorrectionCreator();
prg.run(Languages.getLanguageForShortCode("de"));
}
}
|
List<Integer> linesToModify = new ArrayList<>();
String currentRuleId = null;
Pattern pattern = Pattern.compile(".*id=[\"'](.*?)[\"'].*");
String expectedSubId = ((AbstractPatternRule) rule).getSubId();
int lineCount = 0;
int subRuleCount = 0;
int modifyCount = 0;
boolean inRuleGroup = false;
for (String xmlLine : xmlLines) {
if (xmlLine.contains("<rulegroup")) {
subRuleCount = 0;
inRuleGroup = true;
} else if (xmlLine.contains("</rulegroup>")) {
subRuleCount = 0;
inRuleGroup = false;
} else if ((xmlLine.contains("<rule ")||xmlLine.contains("<rule>")) && inRuleGroup) {
subRuleCount++;
}
Matcher m = pattern.matcher(xmlLine);
if (m.matches()) {
currentRuleId = m.group(1);
}
if (xmlLine.contains("type=\"incorrect\"") || xmlLine.contains("type='incorrect'")) {
if (currentRuleId != null && !currentRuleId.equals(rule.getId())) {
lineCount++;
continue;
}
if (!inRuleGroup) {
subRuleCount = 1;
}
if (!expectedSubId.equals("0") && !expectedSubId.equals(String.valueOf(subRuleCount))) {
lineCount++;
continue;
}
linesToModify.add(lineCount);
break;
}
lineCount++;
}
for (Integer s : linesToModify) {
String newLine = xmlLines.get(s).replaceFirst("type=[\"']incorrect[\"']", newAttribute);
xmlLines.set(s, newLine);
addedCorrectionsCount++;
modifyCount++;
}
if (modifyCount == 0) {
System.err.println("No line modified: " + rule + "[" + expectedSubId + "]");
}
| 732
| 537
| 1,269
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/GermanTaggerEnhancer.java
|
GermanTaggerEnhancer
|
run
|
class GermanTaggerEnhancer {
private static final String[] ADJ_READINGS = {
// singular:
"ADJ:NOM:SIN:MAS:GRU", "ADJ:NOM:SIN:NEU:GRU", "ADJ:NOM:SIN:FEM:GRU", // das Berliner Auto
"ADJ:GEN:SIN:MAS:GRU", "ADJ:GEN:SIN:NEU:GRU", "ADJ:GEN:SIN:FEM:GRU", // des Berliner Autos
"ADJ:DAT:SIN:MAS:GRU", "ADJ:DAT:SIN:NEU:GRU", "ADJ:DAT:SIN:FEM:GRU", // dem Berliner Auto
"ADJ:AKK:SIN:MAS:GRU", "ADJ:AKK:SIN:NEU:GRU", "ADJ:AKK:SIN:FEM:GRU", // den Berliner Bewohner
// plural:
"ADJ:NOM:PLU:MAS:GRU", "ADJ:NOM:PLU:NEU:GRU", "ADJ:NOM:PLU:FEM:GRU", // die Berliner Autos
"ADJ:GEN:PLU:MAS:GRU", "ADJ:GEN:PLU:NEU:GRU", "ADJ:GEN:PLU:FEM:GRU", // der Berliner Autos
"ADJ:DAT:PLU:MAS:GRU", "ADJ:DAT:PLU:NEU:GRU", "ADJ:DAT:PLU:FEM:GRU", // den Berliner Autos
"ADJ:AKK:PLU:MAS:GRU", "ADJ:AKK:PLU:NEU:GRU", "ADJ:AKK:PLU:FEM:GRU", // den Berliner Bewohnern
};
private void run() throws IOException {<FILL_FUNCTION_BODY>}
private boolean hasAnyPosTagStartingWith(Tagger tagger, String word, String initialPosTag) throws IOException {
List<AnalyzedTokenReadings> readings = tagger.tag(Collections.singletonList(word));
return readings.stream().anyMatch(atr -> atr.hasPosTagStartingWith(initialPosTag));
}
public static void main(String[] args) throws IOException {
GermanTaggerEnhancer enhancer = new GermanTaggerEnhancer();
enhancer.run();
}
}
|
final Dictionary dictionary = Dictionary.read(
JLanguageTool.getDataBroker().getFromResourceDirAsUrl("/de/german.dict"));
final DictionaryLookup dl = new DictionaryLookup(dictionary);
Tagger tagger = new GermanyGerman().getTagger();
String prev = null;
for (WordData wd : dl) {
String word = wd.getWord().toString();
if (word.endsWith("er")
&& StringTools.startsWithUppercase(word)
&& !hasAnyPosTagStartingWith(tagger, word, "ADJ:NOM")
&& hasAnyPosTagStartingWith(tagger, word.substring(0, word.length()-2), "EIG")
&& !word.equals(prev)) {
for (String newTags : ADJ_READINGS) {
System.out.println(word + "\t" + word + "\t" + newTags + ":DEF\n"+
word + "\t" + word + "\t" + newTags + ":IND\n"+
word + "\t" + word + "\t" + newTags + ":SOL");
}
prev = word;
}
}
| 645
| 302
| 947
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/MissingGenitiveFinder.java
|
MissingGenitiveFinder
|
hasEsGenitive
|
class MissingGenitiveFinder {
private static final String DICT_FILENAME = "/de/german.dict";
private static final int THRESHOLD = 50;
private final LuceneLanguageModel lm;
private MissingGenitiveFinder() {
lm = new LuceneLanguageModel(new File("/home/dnaber/data/google-ngram-index/de"));
}
private void run() throws IOException {
GermanTagger tagger = new GermanTagger();
final FSA fsa = FSA.read(JLanguageTool.getDataBroker().getFromResourceDirAsStream(DICT_FILENAME));
int i = 0;
for (ByteBuffer buffer : fsa) {
final byte [] sequence = new byte [buffer.remaining()];
buffer.get(sequence);
final String output = new String(sequence, StandardCharsets.UTF_8);
boolean isNoun = output.contains("SUB:") || (output.contains("EIG:") && output.contains("COU")); // COU = Country
if (isNoun && output.contains(":GEN:")) {
String[] parts = output.split("_");
String word = parts[0];
String esWord = parts[0].replaceFirst("s$", "es");
if (isRelevantWord(word)) {
boolean hasEsGenitive = hasEsGenitive(tagger, word);
boolean ignore1 = word.endsWith("els") && !word.endsWith("iels");
long occurrences = lm.getCount(esWord);
if (!hasEsGenitive && !ignore1 && occurrences >= THRESHOLD) {
//System.out.println(i + ". " + word + " " + occurrence);
System.out.println(esWord + "\t" + word.replaceFirst("s$", "") + "\t" + parts[2]);
//System.out.println(" " + occurrences + " " + esWord);
i++;
}
}
}
}
}
private boolean isRelevantWord(String word) {
return word.endsWith("s")
&& !word.endsWith("es")
&& !word.endsWith("ens")
&& !word.endsWith("ems")
&& !word.endsWith("els")
&& !word.endsWith("ers")
&& !word.endsWith("lings")
&& !word.endsWith("leins")
&& !word.endsWith("chens")
&& !word.endsWith("erns")
&& !word.endsWith("elns")
&& !word.endsWith("os")
&& !word.endsWith("us")
&& !word.endsWith("is")
&& !word.endsWith("as")
&& !word.endsWith("ols");
}
private boolean hasEsGenitive(GermanTagger tagger, String word) throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
MissingGenitiveFinder prg = new MissingGenitiveFinder();
prg.run();
}
}
|
String esForm = word.replaceFirst("s$", "es");
List<AnalyzedTokenReadings> readings = tagger.tag(Collections.singletonList(esForm));
for (AnalyzedTokenReadings reading : readings) {
if (reading.isTagged()) {
return true;
}
}
return false;
| 789
| 89
| 878
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/POSTagLanguageModel.java
|
POSTagLanguageModel
|
runOnStdIn
|
class POSTagLanguageModel {
public static void main(final String[] args) throws IOException {
if (args.length == 1) {
final Language language = getLanguageOrExit(args[0]);
final JLanguageTool lt = new JLanguageTool(language, null);
runOnStdIn(lt);
} else {
exitWithUsageMessage();
}
}
private static Language getLanguageOrExit(final String lang) {
Language language = null;
boolean foundLanguage = false;
final List<String> supportedLanguages = new ArrayList<>();
for (final Language tmpLang : Languages.get()) {
supportedLanguages.add(tmpLang.getShortCode());
if (lang.equals(tmpLang.getShortCode())) {
language = tmpLang;
foundLanguage = true;
break;
}
}
if (!foundLanguage) {
System.out.println("Unknown language '" + lang
+ "'. Supported languages are: " + supportedLanguages);
exitWithUsageMessage();
}
return language;
}
private static void exitWithUsageMessage() {
System.out.println("Usage: java org.languagetool.dev.archive.POSTagLanguageModel <language>");
}
private static void runOnStdIn(final JLanguageTool lt) throws IOException {<FILL_FUNCTION_BODY>}
private static void tagText(final String contents, final JLanguageTool lt)
throws IOException {
AnalyzedSentence analyzedText;
final List<String> sentences = lt.sentenceTokenize(contents);
for (final String sentence : sentences) {
analyzedText = lt.getAnalyzedSentence(sentence);
System.out.println(getSentence(analyzedText));
}
}
private static String getSentence(final AnalyzedSentence sent) {
final StringBuilder sb = new StringBuilder();
sb.append("<S>");
for (final AnalyzedTokenReadings atr : sent.getTokensWithoutWhitespace()) {
sb.append(getPOS(atr));
sb.append(' ');
}
sb.append("</S>");
return sb.toString();
}
private static String getPOS(final AnalyzedTokenReadings atr) {
final StringBuilder sb = new StringBuilder();
final int readNum = atr.getReadingsLength();
for (int i = 0; i < readNum; i++) {
if (!atr.isWhitespace()) {
sb.append(atr.getAnalyzedToken(i).getPOSTag());
if (i != readNum - 1) {
sb.append('+');
}
}
}
return sb.toString();
}
}
|
final int MAX_FILE_SIZE = 64_000;
InputStreamReader isr = null;
BufferedReader br = null;
StringBuilder sb = new StringBuilder();
try {
isr = new InputStreamReader(new BufferedInputStream(System.in));
br = new BufferedReader(isr);
String line;
while ((line = br.readLine()) != null) {
sb.append(line);
sb.append('\n');
if (lt.getLanguage().getSentenceTokenizer().singleLineBreaksMarksPara()) {
tagText(sb.toString(), lt);
sb = new StringBuilder();
} else {
if ("".equals(line) || sb.length() >= MAX_FILE_SIZE) {
tagText(sb.toString(), lt);
sb = new StringBuilder();
}
}
}
} finally {
if (sb.length() > 0) {
tagText(sb.toString(), lt);
}
}
br.close();
isr.close();
| 698
| 270
| 968
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/RuleSimplifier.java
|
RuleSimplifier
|
getRegex
|
class RuleSimplifier {
private int touchedRulesCount;
private void run(Language lang) throws IOException {
File basePath = new File("/lt/git/languagetool/languagetool-language-modules");
if (!basePath.exists()) {
throw new RuntimeException("basePath does not exist: " + basePath);
}
String langCode = lang.getShortCode();
File xml = new File(basePath, "/" + langCode + "/src/main/resources/org/languagetool/rules/" + langCode + "/grammar.xml");
List<String> xmlLines = IOUtils.readLines(new FileReader(xml));
JLanguageTool tool = new JLanguageTool(lang);
int totalRules = 0;
for (Rule rule : tool.getAllActiveRules()) {
if (!(rule instanceof PatternRule)) {
continue;
}
PatternRule patternRule = (PatternRule) rule;
String id = patternRule.getFullId();
if (isSimple((PatternRule)rule)) {
System.err.println("Simplifying: " + id);
simplify(patternRule, xmlLines);
} else {
System.err.println("Can't simplify: " + id);
}
totalRules++;
}
System.err.println("touchedRulesCount: " + touchedRulesCount + " out of " + totalRules);
for (String xmlLine : xmlLines) {
System.out.println(xmlLine);
}
}
private boolean isSimple(PatternRule rule) {
return rule.getPatternTokens().stream().allMatch(this::isSimple)
&& rule.getStartPositionCorrection() == 0
&& rule.getEndPositionCorrection() == 0;
}
private boolean isSimple(PatternToken t) {
return !(t.getNegation() || t.getPOSNegation() || t.hasAndGroup() || t.hasExceptionList() ||
t.hasNextException() || t.hasOrGroup() || t.isInflected() || t.isPOStagRegularExpression() ||
t.getPOStag() != null || t.isReferenceElement() || t.isSentenceStart() ||
t.getSkipNext() != 0);
}
private String getRegex(PatternRule rule) {<FILL_FUNCTION_BODY>}
private boolean containsBackRef(String str) {
return str.matches(".*\\\\\\d+.*");
}
private void appendTokenString(StringBuilder sb, String str, boolean setAllParenthesis) {
if (str.contains("|") || setAllParenthesis) {
sb.append('(').append(str).append(')');
} else {
sb.append(str);
}
}
// Note: this is a bad hack, we just iterate through the file's lines
private void simplify(PatternRule rule, List<String> xmlLines) {
List<Integer> linesToRemove = new ArrayList<>();
String currentRuleId = null;
Pattern pattern = Pattern.compile(".*id=[\"'](.*?)[\"'].*");
String expectedSubId = rule.getSubId();
int lineCount = 0;
int subRuleCount = 0;
int removedCount = 0;
boolean inRuleGroup = false;
String newRegex = null;
boolean inAntiPattern = false;
for (lineCount = 0; lineCount < xmlLines.size(); lineCount++) {
//for (String xmlLine : xmlLines) {
String xmlLine = xmlLines.get(lineCount);
if (xmlLine.contains("<rulegroup")) {
subRuleCount = 0;
inRuleGroup = true;
} else if (xmlLine.contains("</rulegroup>")) {
subRuleCount = 0;
inRuleGroup = false;
} else if ((xmlLine.contains("<rule ")||xmlLine.contains("<rule>")) && inRuleGroup) {
subRuleCount++;
}
Matcher m = pattern.matcher(xmlLine);
if (m.matches()) {
currentRuleId = m.group(1);
}
if (currentRuleId != null && !currentRuleId.equals(rule.getId())) {
continue;
}
if (!inRuleGroup) {
subRuleCount = 1;
}
if (!expectedSubId.equals("0") && !expectedSubId.equals(String.valueOf(subRuleCount))) {
continue;
}
if (xmlLine.matches(".*<antipattern.*")) {
inAntiPattern = true;
}
if (inAntiPattern) {
continue;
}
if (xmlLine.matches(".*</antipattern.*")) {
inAntiPattern = false;
continue;
}
if (xmlLine.matches(".*<(token|pattern).*") || xmlLine.matches("\\s*</?marker>.*")) {
linesToRemove.add(lineCount);
}
if (xmlLine.matches(".*</pattern.*")) {
linesToRemove.add(lineCount);
int lastTokenIndent = xmlLine.indexOf('<');
newRegex = Strings.repeat(" ", lastTokenIndent) + getRegex(rule);
}
}
Collections.reverse(linesToRemove); // start from end, as we need to remove items
for (Integer s : linesToRemove) {
xmlLines.remove(s.intValue());
removedCount++;
}
if (removedCount == 0) {
System.err.println("No line removed: " + rule + "[" + expectedSubId + "]");
} else {
xmlLines.add(linesToRemove.get(linesToRemove.size()-1), newRegex);
touchedRulesCount++;
}
}
public static void main(String[] args) throws IOException {
RuleSimplifier prg = new RuleSimplifier();
prg.run(Languages.getLanguageForShortCode("de"));
}
}
|
StringBuilder sb = new StringBuilder();
List<PatternToken> tokens = rule.getPatternTokens();
boolean hasCSParts = tokens.stream().anyMatch(PatternToken::isCaseSensitive);
boolean allCSParts = tokens.stream().allMatch(PatternToken::isCaseSensitive);
for (PatternToken patternToken : rule.getPatternTokens()) {
String str = patternToken.getString();
boolean setAllParenthesis = containsBackRef(rule.getMessage()) || containsBackRef(rule.getSuggestionsOutMsg());
if (hasCSParts && !allCSParts && !patternToken.isCaseSensitive()) {
sb.append("(?i:");
appendTokenString(sb, str, setAllParenthesis);
sb.append(')');
} else {
appendTokenString(sb, str, setAllParenthesis);
}
sb.append(' ');
}
String escapedRegex = XmlEscapers.xmlContentEscaper().escape(sb.toString().trim());
if (allCSParts) {
return "<regexp case_sensitive='yes'>" + escapedRegex + "</regexp>";
}
return "<regexp>" + escapedRegex + "</regexp>";
| 1,553
| 313
| 1,866
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/SimpleRuleCounter.java
|
SimpleRuleCounter
|
isSimple
|
class SimpleRuleCounter {
private void run(List<Language> languages) {
List<Language> sortedLanguages = new ArrayList<>(languages);
sortedLanguages.sort(comparing(Language::getName));
for (Language language : sortedLanguages) {
if (language.isVariant()) {
continue;
}
JLanguageTool lt = new JLanguageTool(language);
List<Rule> allRules = lt.getAllActiveRules();
countForLanguage(allRules, language);
}
}
private void countForLanguage(List<Rule> allRules, Language language) {
int simpleCount = 0;
for (Rule rule : allRules) {
boolean isSimple = true;
if (rule instanceof PatternRule) {
PatternRule patternRule = (PatternRule) rule;
List<PatternToken> tokens = patternRule.getPatternTokens();
for (PatternToken token : tokens) {
if (!isSimple(token)) {
isSimple = false;
break;
}
}
if (isSimple) {
simpleCount++;
//System.out.println("Simple: " + patternRule.getId());
//System.out.println(patternRule.toXML());
//System.out.println("-------------------------");
}
}
}
float percent = (float)simpleCount / allRules.size() * 100;
//System.out.printf(simpleCount + "/" + allRules.size() + " = %.0f%% for " + language + "\n", percent);
System.out.printf("%.0f%% for " + language + "\n", percent);
}
private boolean isSimple(PatternToken t) {<FILL_FUNCTION_BODY>}
public static void main(String[] args) {
SimpleRuleCounter finder = new SimpleRuleCounter();
finder.run(Languages.get());
//finder.run(Collections.singletonList(new GermanyGerman()));
//finder.run(Collections.singletonList(new English()));
}
}
|
return !(t.getNegation() || t.getPOSNegation() || t.hasAndGroup() || t.hasExceptionList() ||
t.hasNextException() || t.hasOrGroup() || t.isInflected() || t.isPOStagRegularExpression() ||
t.getPOStag() != null || t.isReferenceElement() || t.isSentenceStart() ||
t.getSkipNext() != 0);
| 522
| 112
| 634
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/StartTokenCounter.java
|
StartTokenCounter
|
main
|
class StartTokenCounter {
private StartTokenCounter() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
long totalCount = 0;
File dir = new File("/data/google-ngram-index/en/2grams");
try (FSDirectory directory = FSDirectory.open(dir.toPath());
IndexReader reader = DirectoryReader.open(directory)) {
IndexSearcher searcher = new IndexSearcher(reader);
Fields fields = MultiFields.getFields(reader);
Terms ngrams = fields.terms("ngram");
TermsEnum iterator = ngrams.iterator();
BytesRef next;
int i = 0;
while ((next = iterator.next()) != null) {
String term = next.utf8ToString();
if (term.startsWith(LanguageModel.GOOGLE_SENTENCE_START)) {
if (term.matches(".*_(ADJ|ADV|NUM|VERB|ADP|NOUN|PRON|CONJ|DET|PRT)$")) {
//System.out.println("ignore: " + term);
continue;
}
TopDocs topDocs = searcher.search(new TermQuery(new Term("ngram", term)), 3);
if (topDocs.totalHits == 0) {
throw new RuntimeException("No hits for " + term + ": " + topDocs.totalHits);
} else if (topDocs.totalHits == 1) {
int docId = topDocs.scoreDocs[0].doc;
Document document = reader.document(docId);
Long count = Long.parseLong(document.get("count"));
//System.out.println(term + " -> " + count);
totalCount += count;
if (++i % 10_000 == 0) {
System.out.println(i + " ... " + totalCount);
}
} else {
throw new RuntimeException("More hits than expected for " + term + ": " + topDocs.totalHits);
}
}
}
}
System.out.println("==> " + totalCount);
| 49
| 515
| 564
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/UselessExampleFinder.java
|
UselessExampleFinder
|
checkCorrections
|
class UselessExampleFinder {
private int uselessExampleCount;
private int removedLinesCount;
private void run(Language lang) throws IOException {
File basePath = new File("/lt/git/languagetool/languagetool-language-modules");
if (!basePath.exists()) {
throw new RuntimeException("basePath does not exist: " + basePath);
}
String langCode = lang.getShortCode();
File xml = new File(basePath, "/" + langCode + "/src/main/resources/org/languagetool/rules/" + langCode + "/grammar.xml");
List<String> xmlLines = IOUtils.readLines(new FileReader(xml));
JLanguageTool tool = new JLanguageTool(lang);
for (Rule rule : tool.getAllActiveRules()) {
if (!(rule instanceof PatternRule)) {
continue;
}
List<CorrectExample> correctExamples = rule.getCorrectExamples();
List<IncorrectExample> incorrectExamples = rule.getIncorrectExamples();
for (IncorrectExample incorrectExample : incorrectExamples) {
checkCorrections(rule, correctExamples, incorrectExample, xmlLines);
}
}
System.err.println("Useless examples: " + uselessExampleCount);
System.err.println("Removed lines: " + removedLinesCount);
for (String xmlLine : xmlLines) {
System.out.println(xmlLine);
}
}
private void checkCorrections(Rule rule, List<CorrectExample> correctExamplesObjs, IncorrectExample incorrectExample, List<String> xmlLines) {<FILL_FUNCTION_BODY>}
// Note: this is a bad hack, we just iterate through the file's lines
private void removeLinesFromXml(Rule rule, String sentenceToRemove, List<String> xmlLines) {
List<Integer> linesToRemove = new ArrayList<>();
String currentRuleId = null;
Pattern pattern = Pattern.compile(".*id=[\"'](.*?)[\"'].*");
String expectedSubId = ((AbstractPatternRule) rule).getSubId();
int lineCount = 0;
int subRuleCount = 0;
int removedCount = 0;
boolean inRuleGroup = false;
for (String xmlLine : xmlLines) {
if (xmlLine.contains("<rulegroup")) {
subRuleCount = 0;
inRuleGroup = true;
} else if (xmlLine.contains("</rulegroup>")) {
subRuleCount = 0;
inRuleGroup = false;
} else if ((xmlLine.contains("<rule ")||xmlLine.contains("<rule>")) && inRuleGroup) {
subRuleCount++;
}
Matcher m = pattern.matcher(xmlLine);
if (m.matches()) {
currentRuleId = m.group(1);
}
if (!xmlLine.contains("correction=") && xmlLine.contains(sentenceToRemove + "</example>")) {
if (currentRuleId != null && !currentRuleId.equals(rule.getId())) {
lineCount++;
continue;
}
if (!inRuleGroup) {
subRuleCount = 1;
}
if (!expectedSubId.equals("0") && !expectedSubId.equals(String.valueOf(subRuleCount))) {
lineCount++;
continue;
}
linesToRemove.add(lineCount);
break;
}
lineCount++;
}
Collections.reverse(linesToRemove); // start from end, as we need to remove items
for (Integer s : linesToRemove) {
xmlLines.remove(s.intValue());
removedLinesCount++;
removedCount++;
}
if (removedCount == 0) {
System.err.println("No line removed: " + rule + "[" + expectedSubId + "]");
}
}
public static void main(String[] args) throws IOException {
UselessExampleFinder prg = new UselessExampleFinder();
prg.run(Languages.getLanguageForShortCode("de"));
}
}
|
List<String> correctExamples = correctExamplesObjs.stream().map(k -> k.getExample()).collect(Collectors.toList());
List<String> corrections = incorrectExample.getCorrections();
for (String correction : corrections) {
String fixedSentence = incorrectExample.getExample().replaceAll("<marker>.*?</marker>", "<marker>" + correction.replace("$", "\\$") + "</marker>");
String fixedSentenceNoMarker = incorrectExample.getExample().replaceAll("<marker>.*?</marker>", correction.replace("$", "\\$"));
if (correctExamples.contains(fixedSentence)) {
System.err.println("Useless: " + fixedSentence + " in " + rule.getId());
removeLinesFromXml(rule, fixedSentence, xmlLines);
uselessExampleCount++;
}
if (correctExamples.contains(fixedSentenceNoMarker)) {
System.err.println("Useless: " + fixedSentenceNoMarker + " in " + rule.getId());
removeLinesFromXml(rule, fixedSentenceNoMarker, xmlLines);
uselessExampleCount++;
}
}
| 1,059
| 305
| 1,364
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/archive/WordTokenizer.java
|
WordTokenizer
|
run
|
class WordTokenizer {
public static void main(final String[] args) throws IOException {
final WordTokenizer prg = new WordTokenizer();
if (args.length != 1) {
System.err.println("Please supply the language code as the only argument.");
System.exit(-1);
}
prg.run(args[0]);
}
private void run(final String lang) throws IOException {<FILL_FUNCTION_BODY>}
}
|
JLanguageTool lt = new JLanguageTool(Languages.getLanguageForShortCode(lang));
BufferedWriter out = null;
try (BufferedReader in = new BufferedReader(new InputStreamReader(System.in))) {
out = new BufferedWriter(new OutputStreamWriter(System.out));
String line;
while ((line = in.readLine()) != null) {
AnalyzedTokenReadings[] atr = lt.getRawAnalyzedSentence(line).
getTokensWithoutWhitespace();
for (AnalyzedTokenReadings a : atr) {
out.write(a.getToken());
out.write('\n');
}
}
} finally {
if (out != null) {
out.flush();
out.close();
}
}
| 120
| 206
| 326
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/AggregatedNgramToLucene.java
|
AggregatedNgramToLucene
|
addTotalTokenCountDoc
|
class AggregatedNgramToLucene implements AutoCloseable {
private final Map<Integer, LuceneIndex> indexes = new HashMap<>();
private long totalTokenCount = 0;
private long lineCount = 0;
AggregatedNgramToLucene(File indexTopDir) throws IOException {
indexes.put(1, new LuceneIndex(new File(indexTopDir, "1grams")));
indexes.put(2, new LuceneIndex(new File(indexTopDir, "2grams")));
indexes.put(3, new LuceneIndex(new File(indexTopDir, "3grams")));
}
@Override
public void close() throws IOException {
for (LuceneIndex index : indexes.values()) {
index.close();
}
}
void indexInputFile(File file) throws IOException {
System.out.println("=== Indexing " + file + " ===");
try (Scanner scanner = new Scanner(file)) {
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
indexLine(line);
}
}
}
private void indexLine(String line) throws IOException {
if (lineCount++ % 250_000 == 0) {
System.out.printf(Locale.ENGLISH, "Indexing line %d\n", lineCount);
}
String[] lineParts = line.split("\t");
if (lineParts.length != 2) {
System.err.println("Not 2 parts but " + lineParts.length + ", ignoring: '" + line + "'");
return;
}
String ngram = lineParts[0];
String[] ngramParts = ngram.split(" ");
LuceneIndex index = indexes.get(ngramParts.length);
if (index == null) {
throw new RuntimeException("No ngram data found for: " + Arrays.toString(lineParts));
}
long count = Long.parseLong(lineParts[1]);
if (ngramParts.length == 1) {
totalTokenCount += count;
}
index.indexWriter.addDocument(getDoc(ngram, count));
}
@NotNull
private Document getDoc(String ngram, long count) {
Document doc = new Document();
doc.add(new Field("ngram", ngram, StringField.TYPE_NOT_STORED)); // use StringField.TYPE_STORED for easier debugging with e.g. Luke
doc.add(getCountField(count));
return doc;
}
@NotNull
private LongField getCountField(long count) {
FieldType fieldType = new FieldType();
fieldType.setStored(true);
fieldType.setOmitNorms(true);
fieldType.setNumericType(FieldType.NumericType.LONG);
fieldType.setDocValuesType(DocValuesType.NUMERIC);
return new LongField("count", count, fieldType);
}
private void addTotalTokenCountDoc(long totalTokenCount, IndexWriter writer) throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("Usage: " + AggregatedNgramToLucene.class + " <inputDir>");
System.out.println(" <inputDir> is a directory with aggregated ngram files from Hadoop, e.g. produced by CommonCrawlNGramJob");
System.exit(1);
}
File inputDir = new File(args[0]);
File outputDir = new File(inputDir, "index");
System.out.println("Indexing to " + outputDir);
try (AggregatedNgramToLucene prg = new AggregatedNgramToLucene(outputDir)) {
for (File file : inputDir.listFiles()) {
if (file.isFile()) {
prg.indexInputFile(file);
}
}
prg.addTotalTokenCountDoc(prg.totalTokenCount, prg.indexes.get(1).indexWriter);
}
}
static class LuceneIndex {
private final Directory directory;
private final IndexWriter indexWriter;
LuceneIndex(File dir) throws IOException {
Analyzer analyzer = new StandardAnalyzer();
IndexWriterConfig config = new IndexWriterConfig(analyzer);
directory = FSDirectory.open(dir.toPath());
indexWriter = new IndexWriter(directory, config);
}
void close() throws IOException {
indexWriter.close();
directory.close();
}
}
}
|
FieldType fieldType = new FieldType();
fieldType.setIndexOptions(IndexOptions.DOCS);
fieldType.setStored(true);
fieldType.setOmitNorms(true);
Field countField = new Field("totalTokenCount", String.valueOf(totalTokenCount), fieldType);
Document doc = new Document();
doc.add(countField);
writer.addDocument(doc);
| 1,184
| 104
| 1,288
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/AllConfusionRulesEvaluator.java
|
AllConfusionRulesEvaluator
|
main
|
class AllConfusionRulesEvaluator {
private static final int MAX_SENTENCES = 1000;
private AllConfusionRulesEvaluator() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length < 3 || args.length > 4) {
System.err.println("Usage: " + AllConfusionRulesEvaluator.class.getSimpleName()
+ " <langCode> <languageModelTopDir> <wikipediaXml|tatoebaFile|dir>...");
System.err.println(" <languageModelTopDir> is a directory with sub-directories '1grams', '2grams', and '3grams' with Lucene indexes");
System.err.println(" <wikipediaXml|tatoebaFile|dir> either a Wikipedia XML dump, or a Tatoeba file or");
System.err.println(" a directory with example sentences (where <word>.txt contains only the sentences for <word>).");
System.err.println(" You can specify both a Wikipedia file and a Tatoeba file.");
System.exit(1);
}
Language lang;
if ("en".equals(args[0])) {
lang = new ConfusionRuleEvaluator.EnglishLight();
} else {
lang = Languages.getLanguageForShortCode(args[0]);
}
LanguageModel languageModel = new LuceneLanguageModel(new File(args[1]));
List<String> inputsFiles = new ArrayList<>();
inputsFiles.add(args[2]);
if (args.length >= 4) {
inputsFiles.add(args[3]);
}
ConfusionRuleEvaluator eval = new ConfusionRuleEvaluator(lang, languageModel, false, true); // TODO: consider bidirectional
eval.setVerboseMode(false);
ConfusionSetLoader confusionSetLoader = new ConfusionSetLoader(lang);
InputStream inputStream = JLanguageTool.getDataBroker().getFromResourceDirAsStream("/" + lang.getShortCode() +"/confusion_sets.txt");
Map<String,List<ConfusionPair>> confusionSetMap = confusionSetLoader.loadConfusionPairs(inputStream);
Set<String> done = new HashSet<>();
int fMeasureCount = 0;
float fMeasureTotal = 0;
for (List<ConfusionPair> entry : confusionSetMap.values()) {
for (ConfusionPair confusionPair : entry) {
List<ConfusionString> set = confusionPair.getTerms();
if (set.size() != 2) {
System.out.println("Skipping confusion set with size != 2: " + confusionPair);
} else {
Iterator<ConfusionString> iterator = set.iterator();
ConfusionString set1 = iterator.next();
ConfusionString set2 = iterator.next();
String word1 = set1.getString();
String word2 = set2.getString();
String key = word1 + " " + word2;
if (!done.contains(key)) {
Map<Long, RuleEvalResult> evalResults = eval.run(inputsFiles, word1, word2, MAX_SENTENCES,
Arrays.asList(confusionPair.getFactor()), Collections.emptyMap(), Collections.emptyMap());
RuleEvalResult evalResult = evalResults.values().iterator().next();
String summary1 = set1.getDescription() != null ? word1 + "|" + set1.getDescription() : word1;
String summary2 = set2.getDescription() != null ? word2 + "|" + set2.getDescription() : word2;
String start;
if (summary1.compareTo(summary2) < 0) {
start = summary1 + "; " + summary2 + "; " + confusionPair.getFactor();
} else {
start = summary2 + "; " + summary1 + "; " + confusionPair.getFactor();
}
String spaces = StringUtils.repeat(" ", 82-start.length());
System.out.println(start + spaces + "# " + evalResult.getSummary());
double fMeasure = FMeasure.getWeightedFMeasure(evalResult.getPrecision(), evalResult.getRecall());
//System.out.println("f-measure: " + fMeasure);
fMeasureCount++;
fMeasureTotal += fMeasure;
}
done.add(key);
}
}
}
System.out.println("Average f-measure: " + (fMeasureTotal/fMeasureCount));
| 78
| 1,065
| 1,143
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/AutomaticConfusionRuleEvaluatorFilter.java
|
AutomaticConfusionRuleEvaluatorFilter
|
main
|
class AutomaticConfusionRuleEvaluatorFilter {
private final static float MIN_PRECISION = 0.99f;
private final static int MIN_OCCURRENCES = 25;
private AutomaticConfusionRuleEvaluatorFilter() {
}
private static String reformat(String s) {
int spaceStart = s.indexOf("0;");
if (spaceStart == -1) {
spaceStart = s.indexOf("1;");
}
int spaceEnd = s.indexOf('#');
if (spaceStart > 0 && spaceEnd > 0) {
String spaces = StringUtils.repeat(" ", 52-spaceStart);
return s.substring(0, spaceStart+2) + spaces + s.substring(spaceEnd);
}
return s;
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + AutomaticConfusionRuleEvaluatorFilter.class.getSimpleName() + " <file>");
System.out.println(" <file> is the output of " + AutomaticConfusionRuleEvaluator.class.getName());
System.exit(0);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]), StandardCharsets.UTF_8);
String prevKey = null;
int skippedCount = 0;
int lowPrecisionCount = 0;
int lowOccurrenceCount = 0;
int usedCount = 0;
boolean skipping = false;
for (String line : lines) {
if (!line.startsWith("=>")) {
continue;
}
String cleanLine = line.replaceFirst("=> ", "").replaceFirst("; \\d.*", "");
String[] parts;
boolean bothDirections;
if (cleanLine.contains("->")) {
parts = cleanLine.split("\\s*->\\s*");
bothDirections = false;
} else {
parts = cleanLine.split(";\\s*");
bothDirections = true;
}
String key = parts[0] + ";" + parts[1];
Pattern data = Pattern.compile("^(.+?)(?:;| ->) (.+?);.*p=(\\d\\.\\d+), r=(\\d\\.\\d+), f0.5=\\d\\.\\d+, (\\d+)\\+(\\d+),.*");
Matcher m = data.matcher(line.replaceFirst("=> ", ""));
m.find();
String word1 = m.group(1);
String word2 = m.group(2);
String delim = bothDirections ? "; " : " -> ";
String wordGroup = word1 + delim + word2;
if (word1.compareTo(word2) > 0 && bothDirections) {
wordGroup = word2 + delim + word1;
}
float precision = Float.parseFloat(m.group(3));
int occ1 = Integer.parseInt(m.group(5));
int occ2 = Integer.parseInt(m.group(6));
if (key.equals(prevKey)) {
if (skipping) {
//System.out.println("SKIP: " + reformat(line));
}
} else {
if (precision < MIN_PRECISION) {
lowPrecisionCount++;
skippedCount++;
skipping = true;
continue;
}
if (occ1 < MIN_OCCURRENCES || occ2 < MIN_OCCURRENCES) {
lowOccurrenceCount++;
skippedCount++;
skipping = true;
continue;
}
System.out.println(reformat(line.replaceFirst("=> .+?(;| ->) .+?; ", wordGroup + "; ")));
skipping = false;
usedCount++;
}
prevKey = key;
}
System.err.println("Skipped: " + skippedCount);
System.err.println("lowPrecisionCount: " + lowPrecisionCount);
System.err.println("lowOccurrences: " + lowOccurrenceCount);
System.err.println("Used: " + usedCount);
| 236
| 867
| 1,103
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/CommonCrawlToNgram3.java
|
CommonCrawlToNgram3
|
indexInputFile
|
class CommonCrawlToNgram3 implements AutoCloseable {
private static final int MAX_TOKEN_LENGTH = 20;
private static final int MAX_SENTENCE_LENGTH = 50_000;
private static final int CACHE_LIMIT = 1_000_000; // max. number of trigrams in HashMap before we flush to Lucene
private final File input;
private final SentenceTokenizer sentenceTokenizer;
private final Tokenizer wordTokenizer;
private final Map<String, Long> unigramToCount = new HashMap<>();
private final Map<String, Long> bigramToCount = new HashMap<>();
private final Map<String, Long> trigramToCount = new HashMap<>();
private final Map<Integer, FileWriter> ngramSizeToWriter = new HashMap<>();
private long charCount = 0;
private long lineCount = 0;
CommonCrawlToNgram3(Language language, File input, File outputDir) throws IOException {
this.input = input;
this.sentenceTokenizer = language.getSentenceTokenizer();
this.wordTokenizer = new GoogleStyleWordTokenizer();
ngramSizeToWriter.put(1, new FileWriter(new File(outputDir, "unigrams.csv")));
ngramSizeToWriter.put(2, new FileWriter(new File(outputDir, "bigrams.csv")));
ngramSizeToWriter.put(3, new FileWriter(new File(outputDir, "trigrams.csv")));
}
@Override
public void close() throws Exception {
for (Map.Entry<Integer, FileWriter> entry : ngramSizeToWriter.entrySet()) {
entry.getValue().close();
}
}
private void indexInputFile() throws IOException, CompressorException {<FILL_FUNCTION_BODY>}
private void indexLine(String[] lines) throws IOException {
for (String line : lines) {
if (line.length() > MAX_SENTENCE_LENGTH) {
System.out.println("Ignoring long line: " + line.length() + " bytes");
continue;
}
if (lineCount++ % 50_000 == 0) {
float mb = (float) charCount / 1000 / 1000;
System.out.printf(Locale.ENGLISH, "Indexing line %d (%.2fMB)\n", lineCount, mb);
}
charCount += line.length();
List<String> sentences = sentenceTokenizer.tokenize(line);
for (String sentence : sentences) {
indexSentence(sentence);
}
}
}
private void indexSentence(String sentence) throws IOException {
List<String> tokens = wordTokenizer.tokenize(sentence);
tokens.add(0, LanguageModel.GOOGLE_SENTENCE_START);
tokens.add(LanguageModel.GOOGLE_SENTENCE_END);
String prevPrev = null;
String prev = null;
for (String token : tokens) {
if (token.trim().isEmpty()) {
continue;
}
if (token.length() <= MAX_TOKEN_LENGTH) {
unigramToCount.compute(token, (k, v) -> v == null ? 1 : v + 1);
}
if (prev != null) {
if (token.length() <= MAX_TOKEN_LENGTH && prev.length() <= MAX_TOKEN_LENGTH) {
String ngram = prev + " " + token;
bigramToCount.compute(ngram, (k, v) -> v == null ? 1 : v + 1);
}
}
if (prevPrev != null && prev != null) {
if (token.length() <= MAX_TOKEN_LENGTH && prev.length() <= MAX_TOKEN_LENGTH && prevPrev.length() <= MAX_TOKEN_LENGTH) {
String ngram = prevPrev + " " + prev + " " + token;
trigramToCount.compute(ngram, (k, v) -> v == null ? 1 : v + 1);
}
if (unigramToCount.size() > CACHE_LIMIT) {
writeToDisk(1, unigramToCount);
}
if (bigramToCount.size() > CACHE_LIMIT) {
writeToDisk(2, bigramToCount);
}
if (trigramToCount.size() > CACHE_LIMIT) {
writeToDisk(3, trigramToCount);
}
}
prevPrev = prev;
prev = token;
}
}
private void writeToDisk(int ngramSize, Map<String, Long> ngramToCount) throws IOException {
System.out.println("Writing " + ngramToCount.size() + " cached ngrams to disk (ngramSize=" + ngramSize + ")...");
FileWriter writer = ngramSizeToWriter.get(ngramSize);
for (Map.Entry<String, Long> entry : ngramToCount.entrySet()) {
writer.write(entry.getKey() + "\t" + entry.getValue() + "\n");
}
writer.flush();
ngramToCount.clear();
}
public static void main(String[] args) throws Exception {
if (args.length != 3) {
System.out.println("Usage: " + CommonCrawlToNgram3.class + " <langCode> <input.xz/bz2> <outputDir>");
System.exit(1);
}
Language language = Languages.getLanguageForShortCode(args[0]);
File input = new File(args[1]);
File outputDir = new File(args[2]);
try (CommonCrawlToNgram3 prg = new CommonCrawlToNgram3(language, input, outputDir)) {
prg.indexInputFile();
}
}
}
|
FileInputStream fin = new FileInputStream(input);
BufferedInputStream in = new BufferedInputStream(fin);
try (CompressorInputStream input = new CompressorStreamFactory().createCompressorInputStream(in)) {
final byte[] buffer = new byte[8192];
int n;
while ((n = input.read(buffer)) != -1) {
String buf = new String(buffer, 0, n); // TODO: not always correct, we need to wait for line end first?
String[] lines = buf.split("\n");
indexLine(lines);
}
}
writeToDisk(1, unigramToCount);
writeToDisk(2, bigramToCount);
writeToDisk(3, trigramToCount);
| 1,518
| 190
| 1,708
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/ConfusionFileIndenter.java
|
ConfusionFileIndenter
|
indent
|
class ConfusionFileIndenter {
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("Usage: " + ConfusionFileIndenter.class.getSimpleName() + " <file>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
System.out.println(indent(lines));
}
static String indent(List<String> lines) {<FILL_FUNCTION_BODY>}
}
|
StringBuilder indentedLines = new StringBuilder();
Set<String> alreadyDone = new HashSet<>();
for (String line : lines) {
if (!line.startsWith("#") && !line.isEmpty()) {
String[] parts = line.replaceFirst("\\s*#.*", "").split(";\\s*");
String key = parts[0] + ";" + parts[1];
if (alreadyDone.contains(key)) {
//System.err.println("Skipping, already appeared: " + key);
continue;
}
alreadyDone.add(key);
}
int commentPos = line.lastIndexOf('#');
if (commentPos <= 0) {
indentedLines.append(line).append('\n');
} else {
int endData = commentPos;
while (Character.isWhitespace(line.charAt(endData - 1))) {
endData--;
}
String spaces = StringUtils.repeat(" ", Math.max(1, 82-endData));
indentedLines.append(line, 0, endData).append(spaces).append(line.substring(commentPos)).append('\n');
}
}
return indentedLines.toString();
| 146
| 313
| 459
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/ConfusionSetFileFormatter.java
|
ConfusionSetFileFormatter
|
reformat
|
class ConfusionSetFileFormatter {
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("Usage: " + ConfusionSetFileFormatter.class.getSimpleName() + " <confusion_set.txt>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
for (String line : lines) {
System.out.println(reformat(line));
}
}
private static String reformat(String s) {<FILL_FUNCTION_BODY>}
}
|
Pattern pattern = Pattern.compile(";\\s*\\d+");
Matcher matcher = pattern.matcher(s);
if (matcher.find()) {
int spaceStart = matcher.end();
int spaceEnd = s.indexOf('#', 2);
if (spaceStart > 0 && spaceEnd > 0) {
String spaces = StringUtils.repeat(" ", 52-spaceStart);
return s.substring(0, spaceStart+1) + spaces + s.substring(spaceEnd);
}
}
return s;
| 166
| 141
| 307
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/ConfusionSetOccurrenceLookup.java
|
ConfusionSetOccurrenceLookup
|
main
|
class ConfusionSetOccurrenceLookup {
private ConfusionSetOccurrenceLookup() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 2) {
System.out.println("Usage: " + ConfusionSetOccurrenceLookup.class.getName() + " <confusion-file> <ngram-data-dir>");
System.exit(1);
}
try (Scanner sc = new Scanner(new File(args[0]));
LuceneLanguageModel lm = new LuceneLanguageModel(new File(args[1]))
) {
while (sc.hasNextLine()) {
String line = sc.nextLine();
String[] words = line.split(";\\s*");
long total = 0;
List<Long> counts = new ArrayList<>();
StringBuilder sb = new StringBuilder();
for (String word : words) {
long count = lm.getCount(word);
total += count;
sb.append(word).append(':').append(count).append(' ');
counts.add(count);
}
float factor = (float)Collections.max(counts) / Collections.min(counts);
System.out.printf(Locale.ENGLISH, total + " " + line + " " + sb.toString().trim() + " factor:%.1f\n", factor);
}
}
| 59
| 312
| 371
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/ContextBuilder.java
|
ContextBuilder
|
getLeftContext
|
class ContextBuilder {
private final String startMarker;
private final String endMarker;
public ContextBuilder() {
this.startMarker = "_START_";
this.endMarker = "_END_";
}
public ContextBuilder(String startMarker, String endMarker) {
this.startMarker = startMarker;
this.endMarker = endMarker;
}
public List<String> getContext(AnalyzedTokenReadings[] tokens, int pos, int contextSize) {
List<String> l = new ArrayList<>();
int i = 0;
for (AnalyzedTokenReadings token : tokens) {
if (i == pos) {
l.addAll(getLeftContext(tokens, pos, contextSize));
l.add(token.getToken());
l.addAll(getRightContext(tokens, pos, contextSize));
break;
}
i++;
}
return l;
}
private List<String> getLeftContext(AnalyzedTokenReadings[] tokens, int pos, int contextSize) {<FILL_FUNCTION_BODY>}
private List<String> getRightContext(AnalyzedTokenReadings[] tokens, int pos, int contextSize) {
List<String> l = new ArrayList<>();
for (int i = pos + 1; i <= tokens.length && l.size() < contextSize; i++) {
if (i == tokens.length) {
l.add(endMarker);
} else {
l.add(tokens[i].getToken());
}
}
return l;
}
}
|
List<String> l = new ArrayList<>();
for (int i = pos - 1; i >= 0 && l.size() < contextSize; i--) {
if (i == 0) {
l.add(0, startMarker);
} else {
l.add(0, tokens[i].getToken());
}
}
return l;
| 401
| 94
| 495
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/GermanAuxVerbGuesser.java
|
GermanAuxVerbGuesser
|
main
|
class GermanAuxVerbGuesser {
private GermanAuxVerbGuesser() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static long countHaben(LuceneLanguageModel lm, String pa2) {
return
count(lm, pa2, "habe")
+ count(lm, pa2, "hast")
+ count(lm, pa2, "hat")
+ count(lm, pa2, "habt")
+ count(lm, pa2, "haben")
+ count(lm, pa2, "hatte")
+ count(lm, pa2, "hattest")
+ count(lm, pa2, "hatte")
+ count(lm, pa2, "hatten")
+ count(lm, pa2, "hattet")
+ count2(lm, pa2, "werde", "haben")
+ count2(lm, pa2, "wirst", "haben")
+ count2(lm, pa2, "wird", "haben")
+ count2(lm, pa2, "werden", "haben")
+ count2(lm, pa2, "werdet", "haben");
}
private static long countSein(LuceneLanguageModel lm, String pa2) {
return
count(lm, pa2, "bin")
+ count(lm, pa2, "bist")
+ count(lm, pa2, "ist")
+ count(lm, pa2, "sind")
+ count(lm, pa2, "seid")
+ count(lm, pa2, "war")
+ count(lm, pa2, "warst")
+ count(lm, pa2, "war")
+ count(lm, pa2, "waren")
+ count(lm, pa2, "wart")
+ count2(lm, pa2, "werde", "sein")
+ count2(lm, pa2, "wirst", "sein")
+ count2(lm, pa2, "wird", "sein")
+ count2(lm, pa2, "werden", "sein")
+ count2(lm, pa2, "werdet", "sein");
}
private static long count(LuceneLanguageModel lm, String pa2, String verb) {
long count = lm.getCount(asList(verb, pa2));
if (count > 0) {
System.out.println(verb + " " + pa2 + ": " + count);
//long count2 = lm.getCount(asList(verb, pa2, "worden"));
//System.out.println(" BUT: " + verb + " " + pa2 + " worden: " + count2);
}
return count;
}
private static long count2(LuceneLanguageModel lm, String pa2, String werde, String sein) {
long count = lm.getCount(asList(werde, pa2, sein));
if (count > 0) {
System.out.println(werde + " " + pa2 + " " + sein + ": " + count);
}
return count;
}
}
|
if (args.length != 2) {
System.out.println("Usage: " + GermanAuxVerbGuesser.class.getName() + " <ngramDataIndex> <lemmaFile>");
System.out.println(" <lemmaFile> is a text file with 'participle2 \\t lemma' per line, e.g. 'getrunken \t trinken'");
System.exit(1);
}
String indexTopDir = args[0];
List<String> lines = Files.readAllLines(Paths.get(args[1]));
int match = 0;
int noMatch = 0;
int unambiguous = 0;
try (LuceneLanguageModel lm = new LuceneLanguageModel(new File(indexTopDir))) {
for (String line : lines) {
if (line.startsWith("#")) {
continue;
}
String pa2 = line.split("\t")[0];
String lemma = line.split("\t")[1];
long haben = countHaben(lm, pa2);
long sein = countSein(lm, pa2);
float ratio = (float)haben/sein;
System.out.printf(Locale.ENGLISH, "%.2f " + lemma + ": haben: " + haben + ", sein: " + sein + "\n", ratio);
if (haben == 0 && sein == 0) {
noMatch++;
} else {
if (haben == 0 && sein > 0 || haben > 0 && sein == 0) {
unambiguous++;
}
match++;
}
}
}
System.out.println("match: " + match);
System.out.println("noMatch: " + noMatch);
System.out.println("----");
System.out.println("unambiguous: " + unambiguous);
| 822
| 477
| 1,299
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/GermanAuxVerbGuesser2.java
|
GermanAuxVerbGuesser2
|
main
|
class GermanAuxVerbGuesser2 {
private GermanAuxVerbGuesser2() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static long count(LuceneLanguageModel lm, String pa2, String lemma, String reflexivePronoun) {
return
lm.getCount(asList(reflexivePronoun, pa2))
+ lm.getCount(asList(reflexivePronoun, lemma));
}
}
|
if (args.length != 2) {
System.out.println("Usage: " + GermanAuxVerbGuesser2.class.getName() + " <ngramDataIndex> <lemmaFile>");
System.out.println(" <lemmaFile> is a text file with 'participle2 \\t lemma' per line, e.g. 'getrunken \t trinken'");
System.exit(1);
}
String indexTopDir = args[0];
List<String> lines = Files.readAllLines(Paths.get(args[1]));
System.out.println("# factor lemma Dativ/mir Akkusativ/mich");
try (LuceneLanguageModel lm = new LuceneLanguageModel(new File(indexTopDir))) {
for (String line : lines) {
String pa2 = line.split("\t")[0];
String lemma = line.split("\t")[1];
long mir = count(lm, pa2, lemma, "mir");
long mich = count(lm, pa2, lemma, "mich");
long dir = count(lm, pa2, lemma, "dir");
long dich = count(lm, pa2, lemma, "dich");
float factor = ((float)mir + dir) / ((float)mich + dich);
System.out.println(factor + " " + lemma + " " + mir + " " + mich);
}
}
| 138
| 354
| 492
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/GermanReflexiveVerbGuesser.java
|
GermanReflexiveVerbGuesser
|
counterExamples
|
class GermanReflexiveVerbGuesser {
private final Synthesizer synthesizer;
private GermanReflexiveVerbGuesser() {
synthesizer = new GermanyGerman().getSynthesizer();
}
private void run(File indexTopDir, File lemmaListFile) throws IOException {
List<String> lemmas = Files.readAllLines(lemmaListFile.toPath());
System.out.println("Durchschnitt Prozent | Anzahl Lemma | mich/uns/euch ... | ... mich/uns/euch | Lemma");
try (LuceneLanguageModel lm = new LuceneLanguageModel(indexTopDir)) {
for (String lemma : lemmas) {
//if (!lemma.equals("reklamieren")) { continue; }
//if (!lemma.equals("hertreiben")) { continue; }
String[] firstPsSinArray = synthesizer.synthesize(new AnalyzedToken(lemma, "VER:INF:NON", lemma), "VER:1:SIN:PRÄ.*", true);
String[] thirdPsSinArray = synthesizer.synthesize(new AnalyzedToken(lemma, "VER:INF:NON", lemma), "VER:3:SIN:PRÄ.*", true);
String firstPsSin = firstPsSinArray.length > 0 ? firstPsSinArray[0] : null;
String thirdPsSin = thirdPsSinArray.length > 0 ? thirdPsSinArray[0] : null;
long reflexiveCount1 = count1(lm, lemma, firstPsSin, thirdPsSin)
- counterExamples("für", lm, lemma, firstPsSin, thirdPsSin)
- counterExamples("vor", lm, lemma, firstPsSin, thirdPsSin);
long reflexiveCount2 = count2(lm, lemma, firstPsSin, thirdPsSin);
long lemmaCount = lm.getCount(lemma);
float factor1 = ((float)reflexiveCount1 / lemmaCount) * 100.0f;
float factor2 = ((float)reflexiveCount2 / lemmaCount) * 100.0f;
float avgFactor = (factor1 + factor2) / 2;
//System.out.printf("%.2f%% %.2f%% " + reflexiveCount1 + " " + reflexiveCount2 + " " + lemmaCount + " " + lemma + "\n", factor1, factor2);
//System.out.printf("%.2f%% %.2f%% " + lemmaCount + " " + lemma + "\n", factor1, factor2);
System.out.printf("%.2f %d %.2f%% %.2f%% %s\n", avgFactor, lemmaCount, factor1, factor2, lemma);
}
}
}
private long count1(LuceneLanguageModel lm, String lemma, String firstPsSin, String thirdPsSin) {
return
lm.getCount(asList("mich", firstPsSin)) // "wenn ich mich schäme"
+ lm.getCount(asList("mich", lemma)) // "ich muss mich schämen"
//+ lm.getCount(asList("dich", sing2))
+ lm.getCount(asList("sich", thirdPsSin))
+ lm.getCount(asList("uns", lemma))
+ lm.getCount(asList("euch", lemma))
+ lm.getCount(asList("sich", lemma));
}
private long counterExamples(String term, LuceneLanguageModel lm, String lemma, String firstPsSin, String thirdPsSin) {<FILL_FUNCTION_BODY>}
private long count2(LuceneLanguageModel lm, String lemma, String firstPsSin, String thirdPsSin) {
return
lm.getCount(asList(firstPsSin, "mich")) // "schäme mich"
//+ lm.getCount(asList(sing2, "dich"))
+ lm.getCount(asList(thirdPsSin, "sich"))
+ lm.getCount(asList(lemma, "uns"))
//+ lm.getCount(asList(plu2, "euch"))
+ lm.getCount(asList(lemma, "sich"));
}
public static void main(String[] args) throws IOException {
if (args.length != 2) {
System.out.println("Usage: " + GermanReflexiveVerbGuesser.class.getName() + " <ngramDataIndex> <verbLemmaFile>");
System.exit(1);
}
String indexTopDir = args[0];
String lemmaListFile = args[1];
new GermanReflexiveVerbGuesser().run(new File(indexTopDir), new File(lemmaListFile));
}
}
|
return
lm.getCount(asList(term, "mich", firstPsSin)) // "für mich reklamiere"
+ lm.getCount(asList(term, "mich", lemma)) // "... für mich reklamieren"
+ lm.getCount(asList(term, "sich", thirdPsSin))
+ lm.getCount(asList(term, "uns", lemma))
+ lm.getCount(asList(term, "euch", lemma))
+ lm.getCount(asList(term, "sich", lemma));
| 1,214
| 156
| 1,370
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/GermanSpellingReformGuesser.java
|
GermanSpellingReformGuesser
|
run
|
class GermanSpellingReformGuesser {
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("Usage: " + GermanSpellingReformGuesser.class.getSimpleName() + " <lemmaList>");
System.exit(1);
}
new GermanSpellingReformGuesser().run(args[0]);
}
private void run(String lemmaFile) throws IOException {<FILL_FUNCTION_BODY>}
}
|
List<String> lines = Files.readAllLines(Paths.get(lemmaFile));
Set<String> lemmas = new HashSet<>(lines);
Set<String> result = new HashSet<>();
for (String line : lines) {
String oldSpelling1 = line.replace("ss", "ß");
/*if (!oldSpelling1.equals(line) && lemmas.contains(oldSpelling1)) {
result.add(line + ";" + oldSpelling1);
}
String newSpelling1 = line.replace("ß", "ss");
if (!newSpelling1.equals(line) && lemmas.contains(newSpelling1)) {
result.add(newSpelling1 + ";" + line);
}*/
String oldSpelling2 = line.replace("f", "ph");
if (!oldSpelling2.equals(line) && lemmas.contains(oldSpelling2)) {
result.add(line + ";" + oldSpelling2);
}
String newSpelling1 = line.replace("ph", "f");
if (!newSpelling1.equals(line) && lemmas.contains(newSpelling1)) {
result.add(newSpelling1 + ";" + line);
}
}
for (String s : result) {
System.out.println(s);
}
System.err.println(result.size() + " Paare gefunden");
| 134
| 363
| 497
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/GermanUpperLowercaseWords.java
|
GermanUpperLowercaseWords
|
main
|
class GermanUpperLowercaseWords {
private GermanUpperLowercaseWords() {
}
@NotNull
private static Set<String> getUppercaseWords(List<String> lines) {
Set<String> uppercaseWords = new HashSet<>();
for (String line : lines) {
if (StringTools.startsWithUppercase(line)) {
uppercaseWords.add(line);
}
}
return uppercaseWords;
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + GermanUpperLowercaseWords.class.getSimpleName() + " <wordList>");
System.exit(1);
}
List<String> lines = Files.readAllLines(Paths.get(args[0]));
Set<String> uppercaseWords = getUppercaseWords(lines);
for (String line : lines) {
String uppercased = StringTools.uppercaseFirstChar(line);
if (!StringTools.startsWithUppercase(line) && uppercaseWords.contains(uppercased)) {
System.out.println(line + "; " + uppercased);
}
}
| 155
| 183
| 338
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/GermanUppercasePhraseFinder.java
|
GermanUppercasePhraseFinder
|
isRelevant
|
class GermanUppercasePhraseFinder {
private static final long MIN_TERM_LEN = 4;
private static final long LIMIT = 500;
private GermanUppercasePhraseFinder() {
}
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("Usage: " + GermanUppercasePhraseFinder.class.getSimpleName() + " <ngramIndexDir>");
System.exit(1);
}
JLanguageTool lt = new JLanguageTool(Languages.getLanguageForShortCode("de"));
FSDirectory fsDir = FSDirectory.open(new File(args[0]).toPath());
IndexReader reader = DirectoryReader.open(fsDir);
IndexSearcher searcher = new IndexSearcher(reader);
Fields fields = MultiFields.getFields(reader);
Terms terms = fields.terms("ngram");
TermsEnum termsEnum = terms.iterator();
int count = 0;
BytesRef next;
while ((next = termsEnum.next()) != null) {
String term = next.utf8ToString();
count++;
//term = "persischer Golf"; // for testing
String[] parts = term.split(" ");
boolean useful = true;
int lcCount = 0;
List<String> ucParts = new ArrayList<>();
for (String part : parts) {
if (part.length() < MIN_TERM_LEN) {
useful = false;
break;
}
String uc = StringTools.uppercaseFirstChar(part);
if (!part.equals(uc)) {
lcCount++;
}
ucParts.add(uc);
}
if (!useful || lcCount == 0 || lcCount == 2) {
continue;
}
String uppercase = String.join(" ", ucParts);
if (term.equals(uppercase)){
continue;
}
long thisCount = getOccurrenceCount(reader, searcher, term);
long thisUpperCount = getOccurrenceCount(reader, searcher, uppercase);
if (count % 10_000 == 0) {
System.err.println(count + " @ " + term);
}
if (thisCount > LIMIT || thisUpperCount > LIMIT) {
if (thisUpperCount > thisCount) {
if (isRelevant(lt, term)) {
float factor = (float)thisUpperCount / thisCount;
System.out.printf("%.2f " + thisUpperCount + " " + uppercase + " " + thisCount + " " + term + "\n", factor);
}
}
}
}
}
private static boolean isRelevant(JLanguageTool lt, String term) throws IOException {<FILL_FUNCTION_BODY>}
private static long getOccurrenceCount(IndexReader reader, IndexSearcher searcher, String term) throws IOException {
TopDocs topDocs = searcher.search(new TermQuery(new Term("ngram", term)), 5);
if (topDocs.totalHits == 0) {
return 0;
}
int docId = topDocs.scoreDocs[0].doc;
Document document = reader.document(docId);
return Long.parseLong(document.get("count"));
}
}
|
AnalyzedSentence analyzedSentence = lt.analyzeText(term).get(0);
AnalyzedTokenReadings[] tokens = analyzedSentence.getTokensWithoutWhitespace();
if (tokens.length == 1+2) { // 1 is for sentence start
if (tokens[1].hasPartialPosTag("ADJ:") && tokens[2].hasPartialPosTag("SUB:")) {
return true;
}
}
return false;
| 868
| 120
| 988
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/LargestNGramFinder.java
|
LargestNGramFinder
|
main
|
class LargestNGramFinder {
private LargestNGramFinder() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + LargestNGramFinder.class.getSimpleName() + " <ngramIndexDir>");
System.exit(1);
}
FSDirectory fsDir = FSDirectory.open(new File(args[0]).toPath());
IndexReader reader = DirectoryReader.open(fsDir);
IndexSearcher searcher = new IndexSearcher(reader);
Fields fields = MultiFields.getFields(reader);
long max = 0;
String maxTerm = "";
Terms terms = fields.terms("ngram");
TermsEnum termsEnum = terms.iterator();
int count = 0;
BytesRef next;
while ((next = termsEnum.next()) != null) {
String term = next.utf8ToString();
TopDocs topDocs = searcher.search(new TermQuery(new Term("ngram", term)), 5);
int docId = topDocs.scoreDocs[0].doc;
Document document = reader.document(docId);
long thisCount = Long.parseLong(document.get("count"));
if (max < thisCount) {
max = thisCount;
maxTerm = term;
}
if (count % 10_000 == 0) {
System.out.println(count + " -> " + topDocs.totalHits + " for " + term + " -> " + thisCount + ", max so far: " + max + " for '" + maxTerm + "'");
}
count++;
}
System.out.println("Max: " + max + " for " + maxTerm);
| 57
| 418
| 475
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/LuceneIndexExporter.java
|
LuceneIndexExporter
|
main
|
class LuceneIndexExporter {
private static final String FIELD_NAME = "fieldLowercase";
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + LuceneIndexExporter.class.getSimpleName() + " <luceneIndexDir>");
System.exit(1);
}
System.out.println("Using field: " + FIELD_NAME);
Directory directory = SimpleFSDirectory.open(Paths.get(args[0]));
try (DirectoryReader indexReader = DirectoryReader.open(directory)) {
for (int i = 0; i < indexReader.maxDoc(); i++) {
Document doc = indexReader.document(i);
System.out.println(doc.get(FIELD_NAME));
}
}
| 54
| 172
| 226
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/LuceneSimpleIndexCreator.java
|
LuceneSimpleIndexCreator
|
main
|
class LuceneSimpleIndexCreator {
private LuceneSimpleIndexCreator() {}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
private static void addDoc(IndexWriter iw, String ngram, int count) throws IOException {
Document document = new Document();
document.add(new TextField("ngram", ngram, Field.Store.YES));
document.add(new TextField("count", String.valueOf(count), Field.Store.YES));
iw.addDocument(document);
}
}
|
IndexWriterConfig conf = new IndexWriterConfig(new KeywordAnalyzer());
try (IndexWriter iw1 = new IndexWriter(FSDirectory.open(new File("/tmp/1grams").toPath()), conf)) {
addDoc(iw1, "the", 55);
addDoc(iw1, "nice", 10);
addDoc(iw1, "building", 1);
Document document = new Document();
document.add(new TextField("totalTokenCount", String.valueOf(3), Field.Store.YES));
iw1.addDocument(document);
}
IndexWriterConfig conf2 = new IndexWriterConfig(new KeywordAnalyzer());
try (IndexWriter iw2 = new IndexWriter(FSDirectory.open(new File("/tmp/2grams").toPath()), conf2)) {
addDoc(iw2, "the nice", 3);
addDoc(iw2, "nice building", 2);
}
IndexWriterConfig conf3 = new IndexWriterConfig(new KeywordAnalyzer());
try (IndexWriter iw3 = new IndexWriter(FSDirectory.open(new File("/tmp/3grams").toPath()), conf3)) {
addDoc(iw3, "the nice building", 1);
}
| 145
| 323
| 468
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/NGramLookup.java
|
NGramLookup
|
main
|
class NGramLookup {
private NGramLookup() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length < 2) {
System.out.println("Usage: " + NGramLookup.class.getName() + " <ngram...|file> <ngramDataIndex>");
System.out.println(" Example: " + NGramLookup.class.getName() + " \"my house\" /data/ngram-index");
System.out.println(" Example: " + NGramLookup.class.getName() + " /tmp/words.txt /data/ngram-index");
System.exit(1);
}
String indexTopDir = args[args.length-1];
try (LuceneLanguageModel lm = new LuceneLanguageModel(new File(indexTopDir))) {
double totalP = 1;
File maybeFile = new File(args[0]);
if (args.length == 2 && maybeFile.isFile()) {
List<String> lines = Files.readAllLines(maybeFile.toPath());
for (String line : lines) {
long count = lm.getCount(line);
System.out.println(count + "\t" + line);
}
} else {
for (int i = 0; i < args.length -1; i++) {
String[] lookup = args[i].split(" ");
long count = lm.getCount(Arrays.asList(lookup));
Probability p = lm.getPseudoProbability(Arrays.asList(lookup));
System.out.println(Arrays.toString(lookup) + " -> count:" + count + ", " + p + ", log:" + Math.log(p.getProb()));
totalP *= p.getProb();
}
System.out.println("totalP=" + totalP);
}
}
| 51
| 442
| 493
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/NGramUrlGenerator.java
|
NGramUrlGenerator
|
main
|
class NGramUrlGenerator {
private NGramUrlGenerator() {}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
public static void mainDownloadSome(String[] args) throws IOException {
ConfusionSetLoader confusionSetLoader = new ConfusionSetLoader(new AmericanEnglish());
InputStream inputStream = JLanguageTool.getDataBroker().getFromResourceDirAsStream("/en/homophones.txt");
Map<String,List<ConfusionPair>> map = confusionSetLoader.loadConfusionPairs(inputStream);
String url = "http://storage.googleapis.com/books/ngrams/books/googlebooks-eng-all-2gram-20120701-<XX>.gz";
Set<String> nameSet = new HashSet<>();
for (String s : map.keySet()) {
if (s.length() < 2) {
nameSet.add(s.substring(0, 1).toLowerCase() + "_");
} else {
nameSet.add(s.substring(0, 2).toLowerCase());
}
}
List<String> nameList = new ArrayList<>(nameSet);
Collections.sort(nameList);
for (String name : nameList) {
System.out.println(url.replace("<XX>", name));
}
System.err.println("Number of files: " + nameList.size());
}
}
|
String url = "http://storage.googleapis.com/books/ngrams/books/googlebooks-eng-all-4gram-20120701-<XX>.gz";
String chars = "abcdefghijklmnopqrstuvwxyz";
String chars2 = "abcdefghijklmnopqrstuvwxyz_";
for (int i = 0; i <= 9; i++) {
System.out.println(url.replace("<XX>", String.valueOf(i)));
}
for (int i = 0; i < chars.length(); i++) {
for (int j = 0; j < chars2.length(); j++) {
String name = String.valueOf(chars.charAt(i)) + String.valueOf(chars2.charAt(j));
System.out.println(url.replace("<XX>", name));
}
}
System.out.println(url.replace("<XX>", "punctuation"));
| 365
| 258
| 623
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/NeededNGramCounter.java
|
NeededNGramCounter
|
main
|
class NeededNGramCounter {
private static final String LANG = "en";
private NeededNGramCounter() {
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 1) {
System.out.println("Usage: " + NeededNGramCounter.class.getSimpleName() + " <ngramIndexDir>");
System.exit(1);
}
Language lang = Languages.getLanguageForShortCode(LANG);
String path = "/" + lang.getShortCode() + "/confusion_sets.txt";
Set<String> ngrams;
try (InputStream confSetStream = JLanguageTool.getDataBroker().getFromResourceDirAsStream(path)) {
ngrams = new ConfusionSetLoader(new AmericanEnglish()).loadConfusionPairs(confSetStream).keySet();
}
String ngramIndexDir = args[0];
FSDirectory fsDir = FSDirectory.open(new File(ngramIndexDir).toPath());
IndexReader reader = DirectoryReader.open(fsDir);
Fields fields = MultiFields.getFields(reader);
Terms terms = fields.terms("ngram");
TermsEnum termsEnum = terms.iterator();
int i = 0;
int needed = 0;
int notNeeded = 0;
BytesRef next;
while ((next = termsEnum.next()) != null) {
String term = next.utf8ToString();
String[] tmpTerms = term.split(" ");
boolean ngramNeeded = false;
for (String tmpTerm : tmpTerms) {
if (ngrams.contains(tmpTerm)) {
ngramNeeded = true;
break;
}
}
if (ngramNeeded) {
//System.out.println("needed: " + term);
needed++;
} else {
//System.out.println("not needed: " + term);
notNeeded++;
}
if (i % 500_000 == 0) {
System.out.println(i + "/" + terms.getDocCount());
}
i++;
}
System.out.println("language : " + LANG);
System.out.println("ngram index : " + ngramIndexDir);
System.out.println("needed ngrams : " + needed);
System.out.println("not needed ngrams: " + notNeeded);
| 66
| 570
| 636
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/OccurrenceAdder.java
|
OccurrenceAdder
|
main
|
class OccurrenceAdder {
private static final int BUFFER_SIZE = 16384;
private void run(Map<String, Integer> map, File dir) throws IOException {
File[] files = dir.listFiles();
for (File file : files) {
runOnFile(map, file);
}
}
private void runOnFile(Map<String, Integer> map, File file) throws IOException {
System.out.println("Working on " + file);
try (
InputStream fileStream = new FileInputStream(file);
InputStream gzipStream = new GZIPInputStream(fileStream, BUFFER_SIZE);
Reader decoder = new InputStreamReader(gzipStream, "utf-8");
BufferedReader buffered = new BufferedReader(decoder, BUFFER_SIZE)
) {
String line;
while ((line = buffered.readLine()) != null) {
String[] parts = line.split("\t");
String word = parts[0];
int occurrences = Integer.parseInt(parts[2]);
Integer val = map.get(word);
if (val != null) {
map.put(word, val + occurrences);
}
}
}
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
}
|
if (args.length != 2) {
System.out.println("Usage: " + OccurrenceAdder.class.getName() + " <wordfile> <dir>");
System.exit(1);
}
OccurrenceAdder occurrenceAdder = new OccurrenceAdder();
Map<String, Integer> map = new HashMap<>();
List<String> words = IOUtils.readLines(new FileInputStream(args[0]));
for (String word : words) {
map.put(word, 0);
}
occurrenceAdder.run(map, new File(args[1]));
System.out.println("-------------------------");
for (Map.Entry<String, Integer> entry : map.entrySet()) {
System.out.println(entry.getValue() + "\t" + entry.getKey());
}
| 342
| 212
| 554
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/bigdata/TextIndexCreator.java
|
TextIndexCreator
|
indexFile
|
class TextIndexCreator {
private void index(File outputDir, String[] inputFiles) throws IOException {
Analyzer analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET);
IndexWriterConfig config = new IndexWriterConfig(analyzer);
try (FSDirectory directory = FSDirectory.open(outputDir.toPath());
IndexWriter indexWriter = new IndexWriter(directory, config)) {
for (String inputFile : inputFiles) {
indexFile(indexWriter, inputFile);
}
}
}
private void indexFile(IndexWriter indexWriter, String inputFile) throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
if (args.length < 2) {
System.out.println("Usage: " + TextIndexCreator.class.getSimpleName() + " <outputDir> <inputFile...>");
System.exit(0);
}
TextIndexCreator creator = new TextIndexCreator();
File outputDir = new File(args[0]);
if (outputDir.exists()) {
throw new RuntimeException("Output directory already exists: " + outputDir);
}
creator.index(outputDir, Arrays.copyOfRange(args, 1, args.length));
}
}
|
System.out.println("Indexing " + inputFile);
int lineCount = 0;
try (Scanner scanner = new Scanner(new File(inputFile))) {
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
Document doc = new Document();
doc.add(new TextField(Lucene.FIELD_NAME, line, Field.Store.YES));
doc.add(new TextField(Lucene.FIELD_NAME_LOWERCASE, line.toLowerCase(), Field.Store.YES));
indexWriter.addDocument(doc);
if (++lineCount % 10_000 == 0) {
System.out.println(lineCount + "...");
}
}
}
| 328
| 190
| 518
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/diff/DiffTools.java
|
DiffTools
|
getSubId
|
class DiffTools {
private final static Pattern subIdPattern = Pattern.compile("\\[(\\d+)\\]");
private DiffTools() {
}
static String getMasterId(String ruleId) {
return ruleId.replaceFirst("\\[\\d+\\]", "").trim();
}
@Nullable
static String getSubId(String ruleId) {<FILL_FUNCTION_BODY>}
}
|
Matcher matcher = subIdPattern.matcher(ruleId);
if (matcher.find()) {
return matcher.group(1);
} else {
return null;
}
| 114
| 53
| 167
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/diff/LightRuleMatch.java
|
LightRuleMatch
|
toString
|
class LightRuleMatch {
enum Status {
temp_off, on
}
private final int line;
private final int column;
private final String fullRuleId;
private final String message;
private final String category;
private final String context;
private final String coveredText;
private final List<String> suggestions;
private final String ruleSource; // e.g. grammar.xml
private final String title;
private final Status status;
private final List<String> tags;
private final boolean isPremium;
LightRuleMatch(int line, int column, String ruleId, String message, String category, String context, String coveredText,
List<String> suggestions, String ruleSource, String title, Status status, List<String> tags, boolean isPremium) {
this.line = line;
this.column = column;
this.fullRuleId = Objects.requireNonNull(ruleId);
this.message = Objects.requireNonNull(message);
this.category = Objects.requireNonNull(category);
this.context = Objects.requireNonNull(context);
this.coveredText = Objects.requireNonNull(coveredText);
this.suggestions = suggestions == null ? Arrays.asList() : suggestions;
this.ruleSource = ruleSource;
this.title = title;
this.status = Objects.requireNonNull(status);
this.tags = Objects.requireNonNull(tags);
this.isPremium = isPremium;
}
int getLine() {
return line;
}
int getColumn() {
return column;
}
String getFullRuleId() {
return fullRuleId;
}
String getRuleId() {
return DiffTools.getMasterId(fullRuleId);
}
@Nullable
String getSubId() {
return DiffTools.getSubId(fullRuleId);
}
String getMessage() {
return message;
}
String getCategoryName() {
return category;
}
String getContext() {
return context;
}
String getCoveredText() {
return coveredText;
}
List<String> getSuggestions() {
return suggestions;
}
String getRuleSource() {
return ruleSource;
}
String getTitle() {
return title;
}
Status getStatus() {
return status;
}
List<String> getTags() {
return tags;
}
boolean isPremium() {
return isPremium;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return line + "/" + column +
" " + getRuleId() + "[" + getSubId() + "]" +
", msg=" + message +
", covered=" + coveredText +
", suggestions=" + suggestions +
", title=" + title +
//", status=" + status +
", ctx=" + context;
| 693
| 90
| 783
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/diff/MatchKey.java
|
MatchKey
|
toString
|
class MatchKey {
private final int line;
private final int column;
private final String ruleId;
private final String subId; // not considered in equals
private final String title;
private final String coveredText;
MatchKey(int line, int column, String ruleId, String title, String coveredText) {
this.line = line;
this.column = column;
this.ruleId = DiffTools.getMasterId(ruleId);
this.subId = DiffTools.getSubId(ruleId);
this.title = title.trim();
this.coveredText = Objects.requireNonNull(coveredText);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MatchKey matchKey = (MatchKey) o;
return line == matchKey.line &&
column == matchKey.column &&
ruleId.equals(matchKey.ruleId) &&
Objects.equals(title, matchKey.title) &&
coveredText.equals(matchKey.coveredText);
}
@Override
public int hashCode() {
return Objects.hash(line, column, ruleId, title, coveredText);
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return "MatchKey{" +
"line=" + line +
", column=" + column +
", ruleId='" + ruleId + '\'' +
//", (subId='" + subId + '\'' +
", title='" + title + '\'' +
", coveredText='" + coveredText + '\'' +
'}';
| 351
| 90
| 441
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/diff/RuleMatchDiff.java
|
RuleMatchDiff
|
toString
|
class RuleMatchDiff {
private final Status status;
private final LightRuleMatch oldMatch;
private final LightRuleMatch newMatch;
private final LightRuleMatch replacedBy; // the added match that a removed match (maybe) was replaced by
private LightRuleMatch replaces; // the removed match that this match (maybe) replaces
enum Status {
ADDED, REMOVED, MODIFIED
}
static RuleMatchDiff added(LightRuleMatch newMatch) {
return new RuleMatchDiff(Status.ADDED, null, newMatch, null);
}
static RuleMatchDiff removed(LightRuleMatch oldMatch) {
return new RuleMatchDiff(Status.REMOVED, oldMatch, null, null);
}
static RuleMatchDiff removed(LightRuleMatch oldMatch, LightRuleMatch replacedBy) {
return new RuleMatchDiff(Status.REMOVED, oldMatch, null, replacedBy);
}
static RuleMatchDiff modified(LightRuleMatch oldMatch, LightRuleMatch newMatch) {
return new RuleMatchDiff(Status.MODIFIED, oldMatch, newMatch, null);
}
private RuleMatchDiff(Status status, LightRuleMatch oldMatch, LightRuleMatch newMatch, LightRuleMatch replacedBy) {
this.status = Objects.requireNonNull(status);
this.oldMatch = oldMatch;
this.newMatch = newMatch;
this.replacedBy = replacedBy;
}
Status getStatus() {
return status;
}
String getMarkedText() {
return newMatch == null ? oldMatch.getCoveredText() : newMatch.getCoveredText();
}
@Nullable
LightRuleMatch getOldMatch() {
return oldMatch;
}
@Nullable
LightRuleMatch getNewMatch() {
return newMatch;
}
LightRuleMatch getReplacedBy() {
return replacedBy;
}
void setReplaces(LightRuleMatch oldMatch) {
replaces = oldMatch;
}
LightRuleMatch getReplaces() {
return replaces;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return status +
": oldMatch=" + oldMatch +
", newMatch=" + newMatch;
| 557
| 31
| 588
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/errorcorpus/Error.java
|
Error
|
getAppliedCorrection
|
class Error {
private final int startPos;
private final int endPos;
private final String correction;
Error(int startPos, int endPos, String correction) {
if (endPos < startPos) {
throw new RuntimeException("end pos < start pos: " + endPos + " < " + startPos);
}
this.startPos = startPos;
this.endPos = endPos;
this.correction = correction;
}
public int getStartPos() {
return startPos;
}
public int getEndPos() {
return endPos;
}
public String getCorrection() {
return correction;
}
public String getAppliedCorrection(String markupText) {<FILL_FUNCTION_BODY>}
@Override
public String toString() {
return startPos + "-" + endPos + ":" + correction;
}
}
|
try {
String correctionApplied = markupText.substring(0, startPos) + correction + markupText.substring(endPos);
return correctionApplied.replaceAll("<.*?>", "");
} catch (Exception e) {
throw new RuntimeException("Could not get substrings 0-" + startPos + " and " + endPos + "-end: " + markupText);
}
| 237
| 103
| 340
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/errorcorpus/ErrorSentence.java
|
ErrorSentence
|
hasErrorOverlappingWithMatch
|
class ErrorSentence {
private final String markupText;
private final AnnotatedText annotatedText;
private final List<Error> errors;
ErrorSentence(String markupText, AnnotatedText annotatedText, List<Error> errors) {
this.markupText = markupText;
this.annotatedText = annotatedText;
this.errors = errors;
}
public boolean hasErrorCoveredByMatchAndGoodFirstSuggestion(RuleMatch match) {
if (hasErrorCoveredByMatch(match)) {
List<String> suggestion = match.getSuggestedReplacements();
if (suggestion.size() > 0) {
String firstSuggestion = suggestion.get(0);
for (Error error : errors) {
// The correction from AtD might be "an hour", whereas the error might just span the wrong "a",
// so we just apply the suggestion and see if what we get is the perfect result as specified
// by the corpus:
String correctedByCorpus = error.getAppliedCorrection(markupText);
String correctedByRuleMarkup = markupText.substring(0, match.getFromPos()) +
match.getSuggestedReplacements().get(0) + markupText.substring(match.getToPos());
String correctedByRule = correctedByRuleMarkup.replaceAll("<.*?>", "");
if (correctedByRule.equals(correctedByCorpus)) {
return true;
}
if (error.getCorrection().equalsIgnoreCase(firstSuggestion)) {
return true;
}
}
}
}
return false;
}
public boolean hasErrorCoveredByMatch(RuleMatch match) {
for (Error error : errors) {
if (match.getFromPos() <= error.getStartPos() && match.getToPos() >= error.getEndPos()) {
return true;
}
}
return false;
}
/** @since 3.2 */
public boolean hasErrorOverlappingWithMatch(RuleMatch match) {<FILL_FUNCTION_BODY>}
public String getMarkupText() {
return markupText;
}
public AnnotatedText getAnnotatedText() {
return annotatedText;
}
public List<Error> getErrors() {
return errors;
}
@Override
public String toString() {
return markupText;
}
}
|
for (Error error : errors) {
if (match.getFromPos() <= error.getStartPos() && match.getToPos() >= error.getEndPos() ||
match.getFromPos() >= error.getStartPos() && match.getFromPos() <= error.getEndPos() ||
match.getToPos() >= error.getStartPos() && match.getToPos() <= error.getEndPos()) {
return true;
}
}
return false;
| 629
| 121
| 750
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/errorcorpus/PedlerCorpus.java
|
PedlerCorpus
|
getIncorrectSentence
|
class PedlerCorpus implements ErrorCorpus {
private static final String NORMALIZE_REGEX = "\\s*<ERR targ\\s*=\\s*([^>]*?)\\s*>\\s*(.*?)\\s*</ERR>\\s*";
private final List<String> lines = new ArrayList<>();
private int pos;
public PedlerCorpus(File dir) throws IOException {
File[] files = dir.listFiles();
if (files == null) {
throw new RuntimeException("Directory not found or is not a directory: " + dir);
}
for (File file : files) {
if (!file.getName().endsWith(".txt")) {
System.out.println("Ignoring " + file + ", does not match *.txt");
continue;
}
try (FileInputStream fis = new FileInputStream(file)) {
lines.addAll(IOUtils.readLines(fis));
}
}
}
@Override
public Iterator<ErrorSentence> iterator() {
return new Iterator<ErrorSentence>() {
@Override
public boolean hasNext() {
return pos < lines.size();
}
@Override
public ErrorSentence next() {
String line = lines.get(pos++);
ErrorSentence sentence = getIncorrectSentence(line);
return sentence;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
private ErrorSentence getIncorrectSentence(String line) {<FILL_FUNCTION_BODY>}
private AnnotatedText makeAnnotatedText(String pseudoXml) {
AnnotatedTextBuilder builder = new AnnotatedTextBuilder();
StringTokenizer tokenizer = new StringTokenizer(pseudoXml, "<>", true);
boolean inMarkup = false;
while (tokenizer.hasMoreTokens()) {
String part = tokenizer.nextToken();
if (part.startsWith("<")) {
builder.addMarkup(part);
inMarkup = true;
} else if (part.startsWith(">")) {
inMarkup = false;
builder.addMarkup(part);
} else {
if (inMarkup) {
builder.addMarkup(part);
} else {
builder.addText(part);
}
}
}
return builder.build();
}
}
|
String normalized = line.replaceAll(NORMALIZE_REGEX, " <ERR targ=$1>$2</ERR> ").replaceAll("\\s+", " ").trim();
List<Error> errors = new ArrayList<>();
int startPos = 0;
while (normalized.indexOf("<ERR targ=", startPos) != -1) {
int startTagStart = normalized.indexOf("<ERR targ=", startPos);
int startTagEnd = normalized.indexOf(">", startTagStart);
int endTagStart = normalized.indexOf("</ERR>", startTagStart);
int correctionEnd = normalized.indexOf(">", startTagStart);
String correction = normalized.substring(startTagStart + "<ERR targ=".length(), correctionEnd);
errors.add(new Error(startTagEnd + 1, endTagStart, correction));
startPos = startTagStart + 1;
}
return new ErrorSentence(normalized, makeAnnotatedText(normalized), errors);
| 627
| 249
| 876
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/errorcorpus/SimpleCorpus.java
|
SimpleCorpus
|
getIncorrectSentence
|
class SimpleCorpus implements ErrorCorpus {
private final List<String> lines = new ArrayList<>();
private int pos;
public SimpleCorpus(File simpleTextFile) throws IOException {
try (FileInputStream fis = new FileInputStream(simpleTextFile)) {
lines.addAll(IOUtils.readLines(fis).stream().filter(line -> line.matches("\\d+\\..*")).collect(Collectors.toList()));
}
System.out.println("Loaded " + lines.size() + " example sentences");
}
@Override
public Iterator<ErrorSentence> iterator() {
return new Iterator<ErrorSentence>() {
@Override
public boolean hasNext() {
return pos < lines.size();
}
@Override
public ErrorSentence next() {
String line = lines.get(pos++);
return getIncorrectSentence(line);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
private ErrorSentence getIncorrectSentence(String line) {<FILL_FUNCTION_BODY>}
private AnnotatedText makeAnnotatedText(String text) {
AnnotatedTextBuilder builder = new AnnotatedTextBuilder();
builder.addText(text.replace("_", " ").replaceAll("\\s+", " "));
return builder.build();
}
}
|
String normalized = line.replaceFirst("\\d+\\.\\s*", "");
String normalizedNoCorrection = normalized.replaceFirst("=>.*", "").trim();
int startError = normalized.indexOf('_');
int endError = normalized.indexOf('_', startError+1);
if (startError == -1 || endError == -1) {
throw new RuntimeException("No '_..._' marker found: " + line);
}
int startCorrectionMarker = normalized.indexOf("=>");
if (startCorrectionMarker == -1) {
throw new RuntimeException("No '=>' marker found: " + line);
}
String correction = normalized.substring(startCorrectionMarker + "=>".length());
List<Error> errors = Arrays.asList(new Error(startError + 1, endError - 1, correction));
return new ErrorSentence(normalizedNoCorrection, makeAnnotatedText(normalizedNoCorrection), errors);
| 368
| 237
| 605
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/AfterTheDeadlineEvaluator.java
|
AfterTheDeadlineEvaluator
|
run
|
class AfterTheDeadlineEvaluator {
private static final int WAIT_TIME_MILLIS = 1000;
private final String urlPrefix;
AfterTheDeadlineEvaluator(String urlPrefix) {
this.urlPrefix = urlPrefix;
}
private void run(Language lang) throws IOException, InterruptedException {<FILL_FUNCTION_BODY>}
private List<Rule> getRules(Language lang) throws IOException {
JLanguageTool lt = new JLanguageTool(lang);
return lt.getAllActiveRules();
}
private boolean queryAtDServer(IncorrectExample example) {
String sentence = ExampleSentence.cleanMarkersInExample(example.getExample());
try {
URL url = new URL(urlPrefix + URLEncoder.encode(sentence, "UTF-8"));
String result = getContent(url);
if (isExpectedErrorFound(example, result)) {
return true;
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return false;
}
private String getContent(URL url) throws IOException {
final InputStream contentStream = (InputStream) url.getContent();
return StringTools.streamToString(contentStream, "UTF-8");
}
boolean isExpectedErrorFound(IncorrectExample incorrectExample, String resultXml) throws XPathExpressionException {
String example = incorrectExample.getExample();
Document document = getDocument(resultXml);
XPath xPath = XPathFactory.newInstance().newXPath();
NodeList errorStrings = (NodeList)xPath.evaluate("//string/text()", document, XPathConstants.NODESET);
for (int i = 0; i < errorStrings.getLength(); i++) {
String errorStr = errorStrings.item(i).getNodeValue();
if (errorStr.isEmpty()) {
continue;
}
List<Integer> errorStartPosList = getStartPositions(incorrectExample, errorStr);
List<String> mismatches = new ArrayList<>();
for (Integer errorStartPos : errorStartPosList) {
int errorEndPos = errorStartPos + errorStr.length();
int expectedErrorStartPos = example.indexOf("<marker>");
int expectedErrorEndPos = errorStartPos + errorStr.length();
if (errorStartPos == expectedErrorStartPos && errorEndPos == expectedErrorEndPos) {
return true;
} else {
mismatches.add("Position mismatch: " + errorStartPos + "-" + errorEndPos + " != " + expectedErrorStartPos + "-" + expectedErrorEndPos);
}
}
for (String mismatch : mismatches) {
System.out.println(" " + mismatch);
}
}
return false;
}
private List<Integer> getStartPositions(IncorrectExample example, String searchStr) {
List<Integer> posList = new ArrayList<>();
int pos = 0;
String sentence = ExampleSentence.cleanMarkersInExample(example.getExample());
while ((pos = sentence.indexOf(searchStr, pos)) != -1) {
posList.add(pos);
pos++;
}
return posList;
}
private Document getDocument(String xml) {
try {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
InputSource inputSource = new InputSource(new StringReader(xml));
return builder.parse(inputSource);
} catch (Exception e) {
throw new RuntimeException("Could not parse XML: " + xml);
}
}
public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("Usage: " + AfterTheDeadlineEvaluator.class.getSimpleName() + " <langCode> <urlPrefix>");
System.err.println(" <urlPrefix> After the Deadline instance, e.g. 'http://de.service.afterthedeadline.com/checkDocument?key=test&data='");
System.exit(1);
}
AfterTheDeadlineEvaluator evaluator = new AfterTheDeadlineEvaluator(args[1]);
evaluator.run(Languages.getLanguageForShortCode(args[0]));
}
}
|
List<Rule> rules = getRules(lang);
int sentenceCount = 0;
int errorFoundCount = 0;
System.out.println("Starting test for " + lang.getName() + " on " + urlPrefix);
System.out.println("Wait time between HTTP requests: " + WAIT_TIME_MILLIS + "ms");
System.out.println("Starting test on " + rules.size() + " rules");
for (Rule rule : rules) {
if (rule.isDefaultOff()) {
System.out.println("Skipping rule that is off by default: " + rule.getId());
continue;
}
List<IncorrectExample> incorrectExamples = rule.getIncorrectExamples();
System.out.println("\n" + rule.getId() + ":");
if (incorrectExamples.isEmpty()) {
System.out.println(" (no examples)");
continue;
}
for (IncorrectExample example : incorrectExamples) {
boolean match = queryAtDServer(example);
sentenceCount++;
if (match) {
errorFoundCount++;
}
String marker = match ? "+" : "-";
System.out.println(" [" + marker + "] " + example.getExample().replace("<marker>", "<m>").replace("</marker>", "</m>"));
Thread.sleep(WAIT_TIME_MILLIS);
}
//use this to stop: if (sentenceCount > 100) { break; }
}
System.out.println("\nDone.");
System.out.println("Sentence count: " + sentenceCount);
float percentage = (float)errorFoundCount / sentenceCount * 100;
System.out.printf("Expected errors found: " + errorFoundCount + " (%.2f%%)\n", percentage);
| 1,092
| 463
| 1,555
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/CheckBNC.java
|
BNCTextFilter
|
filter
|
class BNCTextFilter {
public String filter(String text) {<FILL_FUNCTION_BODY>}
}
|
String fText = text.replaceAll("(?s)<header.*?>.*?</header>", "");
fText = fText.replaceAll("<w.*?>", "");
fText = fText.replaceAll("<c.*?>", "");
fText = fText.replaceAll("<.*?>", "");
fText = fText.replaceAll(" +", " ");
fText = fText.replaceAll("&bquo|&equo", "\"");
fText = fText.replaceAll("—?", "--");
fText = fText.replaceAll("&?", "&");
return fText;
| 35
| 170
| 205
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/ConfusionPairEvaluator.java
|
ConfusionPairEvaluator
|
main
|
class ConfusionPairEvaluator {
private final static String encoding = "UTF-8";
static String[] words = new String[2];
static String[] ruleIds = new String[2];
static int[][] results = new int[2][4]; // word0/word1 ; TP/FP/TN/FN
static Language lang = null;
static JLanguageTool lt = null;
static List<String> classifyTypes = Arrays.asList("TP", "FP", "TN", "FN");
public static void main(String[] args) throws Exception {<FILL_FUNCTION_BODY>}
private static void analyzeSentence(String correctSentence, int j) throws IOException {
List<RuleMatch> matchesCorrect = lt.check(correctSentence);
if (containsID(matchesCorrect, ruleIds[j])) {
results[j][classifyTypes.indexOf("FP")]++;
System.out.println(ruleIds[j] + " FP: " + correctSentence);
} else {
results[j][classifyTypes.indexOf("TN")]++;
//System.out.println(ruleIds[j] + " TN: " + correctSentence);
}
String wrongSentence = correctSentence.replaceAll("\\b" + words[j] + "\\b", words[1 - j]);
List<RuleMatch> matchesWrong = lt.check(wrongSentence);
if (containsID(matchesWrong, ruleIds[1 - j])) {
results[1 - j][classifyTypes.indexOf("TP")]++;
//System.out.println(ruleIds[1 - j] + " TP: " + wrongSentence);
} else {
results[1 - j][classifyTypes.indexOf("FN")]++;
System.out.println(ruleIds[1 - j] + " FN: " + wrongSentence);
}
//FP+FN in the same sentence -> probable error in corpus
}
private static boolean containsID (List<RuleMatch> matches, String id) {
for (RuleMatch match : matches) {
if (match.getRule().getId().equals(id)) {
return true;
}
}
return false;
}
private int indexOfWord(String word, String sentence) {
Pattern p = Pattern.compile("\\b" + word + "\\b");
Matcher m = p.matcher(sentence);
if (m != null) {
return m.start();
}
return -1;
}
private static InputStreamReader getInputStreamReader(String filename, String encoding) throws IOException {
String charsetName = encoding != null ? encoding : Charset.defaultCharset().name();
InputStream is = System.in;
if (!isStdIn(filename)) {
is = new FileInputStream(new File(filename));
BOMInputStream bomIn = new BOMInputStream(is, true, ByteOrderMark.UTF_8, ByteOrderMark.UTF_16BE,
ByteOrderMark.UTF_16LE, ByteOrderMark.UTF_32BE, ByteOrderMark.UTF_32LE);
if (bomIn.hasBOM() && encoding == null) {
charsetName = bomIn.getBOMCharsetName();
}
is = bomIn;
}
return new InputStreamReader(new BufferedInputStream(is), charsetName);
}
private static boolean isStdIn(String filename) {
return "-".equals(filename);
}
private static void help() {
System.out.println("Usage: " + ConfusionPairEvaluator.class.getSimpleName()
+ " <language code> <intput file> word1 word2 ruleId1 ruleId2");
System.exit(1);
}
}
|
if (args.length != 6) {
help();
}
long start = System.currentTimeMillis();
lang = Languages.getLanguageForShortCode(args[0]);
lt = new JLanguageTool(lang);
String filename = args[1];
words[0] = args[2];
words[1] = args[3];
ruleIds[0] = args[4];
ruleIds[1] = args[5];
for (Rule rule : lt.getAllRules()) {
if (!rule.getId().equals(ruleIds[0]) && !rule.getId().equals(ruleIds[1])) {
lt.disableRule(rule.getId());
}
}
try (InputStreamReader isr = getInputStreamReader(filename, encoding);
BufferedReader br = new BufferedReader(isr)) {
String line;
while ((line = br.readLine()) != null) {
List<String> sentencesLine = lt.sentenceTokenize(line);
for (String sentence : sentencesLine) {
List<String> tokens = lang.getWordTokenizer().tokenize(sentence);
int count0=0;
int count1=0;
for (String token : tokens) {
if (token.equals(words[0])) {
count0++;
}
if (token.equals(words[1])) {
count1++;
}
}
if (count0>0 && count1>0) {
System.out.println("WARNING Sentence with the two words: " + sentence);
} else if (count0==1 && count1==0) {
analyzeSentence(sentence, 0);
} else if (count0==0 && count1==1) {
analyzeSentence(sentence, 1);
} else if (count0>1 || count1>1) {
System.out.println("WARNING Sentence with a repeated word: " + sentence);
}
}
}
}
for (int i=0; i<2; i++) {
System.out.println("Results for rule "+ruleIds[i]);
for (int j=0; j<4; j++) {
System.out.println(classifyTypes.get(j)+": "+results[i][j]);
}
float precision = results[i][classifyTypes.indexOf("TP")] / (float) (results[i][classifyTypes.indexOf("TP")] + results[i][classifyTypes.indexOf("FP")]);
float recall = results[i][classifyTypes.indexOf("TP")] / (float) (results[i][classifyTypes.indexOf("TP")] + results[i][classifyTypes.indexOf("FN")]);
System.out.println("Precision: " + String.format("%.4f", precision));
System.out.println("Recall: " + String.format("%.4f", recall));
}
float time = (float) ((System.currentTimeMillis() - start) / 1000.0);
System.out.println("Total time: " + String.format("%.2f", time) + " seconds");
| 963
| 785
| 1,748
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/LanguageToolEvaluator.java
|
LanguageToolEvaluator
|
disableRules
|
class LanguageToolEvaluator implements Evaluator {
private final JLanguageTool lt;
private final LanguageModel languageModel;
LanguageToolEvaluator(File indexTopDir) throws IOException {
lt = new JLanguageTool(new BritishEnglish());
disableRules();
if (indexTopDir != null) {
if (indexTopDir.isDirectory()) {
languageModel = new LuceneLanguageModel(indexTopDir);
System.out.println("Using Lucene language model from " + languageModel);
EnglishConfusionProbabilityRule probabilityRule =
new EnglishConfusionProbabilityRule(JLanguageTool.getMessageBundle(), languageModel, new English());
//new EnglishConfusionProbabilityRule(JLanguageTool.getMessageBundle(), languageModel, new File("/tmp/languagetool_network.net"));
lt.addRule(probabilityRule);
} else {
throw new RuntimeException("Does not exist or not a directory: " + indexTopDir);
}
} else {
languageModel = null;
}
}
@Override
public void close() {
if (languageModel != null) {
languageModel.close();
}
}
private void disableRules() {<FILL_FUNCTION_BODY>}
@Override
public List<RuleMatch> check(AnnotatedText annotatedText) throws IOException {
return lt.check(annotatedText);
}
}
|
// The Pedler corpus has some real errors that have no error markup, so we disable
// some rules that typically match those:
lt.disableRule("COMMA_PARENTHESIS_WHITESPACE");
lt.disableRule("SENT_START_CONJUNCTIVE_LINKING_ADVERB_COMMA");
lt.disableRule("EN_QUOTES");
lt.disableRule("I_LOWERCASE");
//langTool.disableRule("MORFOLOGIK_RULE_EN_GB"); // disabling spell rule improves precision 0.77 -> 0.88 (as of 2014-07-18)
// turn off style rules:
lt.disableRule("LITTLE_BIT");
lt.disableRule("ALL_OF_THE");
lt.disableRule("SOME_OF_THE");
// British English vs. American English - not clear whether the corpus contains only BE:
lt.disableRule("EN_GB_SIMPLE_REPLACE");
lt.disableRule("APARTMENT-FLAT");
| 359
| 282
| 641
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/MissingDiacriticsEvaluator.java
|
MissingDiacriticsEvaluator
|
analyzeSentence
|
class MissingDiacriticsEvaluator {
private final static String encoding = "UTF-8";
static String[] words = new String[2];
static String[] ruleIds = new String[2];
static int[][] results = new int[2][4]; // word0/word1 ; TP/FP/TN/FN
static Language lang = null;
static JLanguageTool lt = null;
static List<String> classifyTypes = Arrays.asList("TP", "FP", "TN", "FN");
public static void main(String[] args) throws Exception {
if (args.length != 3) {
help();
}
long start = System.currentTimeMillis();
lang = Languages.getLanguageForShortCode(args[0]);
lt = new JLanguageTool(lang);
String filename = args[1];
words[0] = args[2];
words[1] = StringTools.removeDiacritics(words[0]);
ruleIds[0] = "Rules " + words[0] + " -> " + words[1]; // remove diacritics rules
ruleIds[1] = "Rules " + words[1] + " -> " + words[0]; // missing diacritics rules
// for (Rule rule : lt.getAllRules()) {
// if (!rule.getId().equals(ruleIds[0]) && !rule.getId().equals(ruleIds[1])) {
// lt.disableRule(rule.getId());
// }
// }
try (InputStreamReader isr = getInputStreamReader(filename, encoding);
BufferedReader br = new BufferedReader(isr)) {
String line;
while ((line = br.readLine()) != null) {
List<String> sentencesLine = lt.sentenceTokenize(line);
for (String sentence : sentencesLine) {
List<String> tokens = lang.getWordTokenizer().tokenize(sentence);
int pos = 0;
for (String token : tokens) {
if (token.equalsIgnoreCase(words[0])) {
analyzeSentence(sentence, 0, pos);
}
if (token.equalsIgnoreCase(words[1])) {
analyzeSentence(sentence, 1, pos);
}
pos += token.length();
}
}
}
}
for (int i = 0; i < 2; i++) {
System.out.println("Results for: " + ruleIds[i]);
for (int j = 0; j < 4; j++) {
System.out.println(classifyTypes.get(j) + ": " + results[i][j]);
}
float precision = results[i][classifyTypes.indexOf("TP")]
/ (float) (results[i][classifyTypes.indexOf("TP")] + results[i][classifyTypes.indexOf("FP")]);
float recall = results[i][classifyTypes.indexOf("TP")]
/ (float) (results[i][classifyTypes.indexOf("TP")] + results[i][classifyTypes.indexOf("FN")]);
System.out.println("Precision: " + String.format("%.4f", precision));
System.out.println("Recall: " + String.format("%.4f", recall));
}
float time = (float) ((System.currentTimeMillis() - start) / 1000.0);
System.out.println("Total time: " + String.format("%.2f", time) + " seconds");
}
private static void analyzeSentence(String correctSentence, int j, int fromPos) throws IOException {<FILL_FUNCTION_BODY>}
private static boolean isThereErrorAtPos(List<RuleMatch> matches, int pos) {
for (RuleMatch match : matches) {
if (match.getFromPos() <= pos && match.getToPos() > pos) {
return true;
}
}
return false;
}
// private static boolean containsID (List<RuleMatch> matches, String id) {
// for (RuleMatch match : matches) {
// if (match.getRule().getId().equals(id)) {
// return true;
// }
// }
// return false;
// }
private static InputStreamReader getInputStreamReader(String filename, String encoding) throws IOException {
String charsetName = encoding != null ? encoding : Charset.defaultCharset().name();
InputStream is = System.in;
if (!isStdIn(filename)) {
is = new FileInputStream(new File(filename));
BOMInputStream bomIn = new BOMInputStream(is, true, ByteOrderMark.UTF_8, ByteOrderMark.UTF_16BE,
ByteOrderMark.UTF_16LE, ByteOrderMark.UTF_32BE, ByteOrderMark.UTF_32LE);
if (bomIn.hasBOM() && encoding == null) {
charsetName = bomIn.getBOMCharsetName();
}
is = bomIn;
}
return new InputStreamReader(new BufferedInputStream(is), charsetName);
}
private static boolean isStdIn(String filename) {
return "-".equals(filename);
}
private static void help() {
System.out.println("Usage: " + MissingDiacriticsEvaluator.class.getSimpleName()
+ " <language code> <corpus file> <word with diacritics>");
System.exit(1);
}
}
|
boolean isFP = false;
boolean isFN = false;
List<RuleMatch> matchesCorrect = lt.check(correctSentence);
if (isThereErrorAtPos(matchesCorrect, fromPos)) {
results[j][classifyTypes.indexOf("FP")]++;
// if (j==1) {
System.out.println(ruleIds[j] + " FP: " + correctSentence);
// }
isFP = true;
} else {
results[j][classifyTypes.indexOf("TN")]++;
// System.out.println(ruleIds[j] + " TN: " + correctSentence);
}
// String wrongSentence = correctSentence.replaceAll("\\b" + words[j] + "\\b",
// words[1 - j]);
String replaceWith = words[1 - j];
if (StringTools.isCapitalizedWord(words[j])) {
replaceWith = StringTools.uppercaseFirstChar(replaceWith);
}
if (StringTools.isAllUppercase(replaceWith)) {
replaceWith = replaceWith.toUpperCase();
}
String wrongSentence = correctSentence.substring(0, fromPos) + replaceWith
+ correctSentence.substring(fromPos + words[j].length(), correctSentence.length());
if (wrongSentence.equals(correctSentence)) {
System.out.println("Word cannot be replaced: " + wrongSentence);
return;
}
List<RuleMatch> matchesWrong = lt.check(wrongSentence);
if (isThereErrorAtPos(matchesWrong, fromPos)) {
results[1 - j][classifyTypes.indexOf("TP")]++;
// System.out.println(ruleIds[1 - j] + " TP: " + wrongSentence);
} else {
results[1 - j][classifyTypes.indexOf("FN")]++;
if (j == 0) {
System.out.println(ruleIds[1 - j] + " FN: " + wrongSentence);
}
isFN = true;
}
// FP+FN in the same sentence -> probable error in corpus
if (isFP && isFN) {
System.out.println("POSSIBLE ERROR IN CORPUS: " + correctSentence);
}
| 1,375
| 593
| 1,968
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/RealWordCorpusEvaluator.java
|
RealWordCorpusEvaluator
|
checkLines
|
class RealWordCorpusEvaluator {
private final Evaluator evaluator;
private final List<String> badConfusionMatchWords = new ArrayList<>();
private int sentenceCount;
private int errorsInCorpusCount;
private int perfectMatches;
private int goodMatches;
private int matchCount;
private int perfectConfusionMatches;
private int goodConfusionMatches;
private int badConfusionMatches;
RealWordCorpusEvaluator(File indexDir) throws IOException {
evaluator = getEvaluator(indexDir);
}
@NotNull
protected Evaluator getEvaluator(File indexTopDir) throws IOException {
Evaluator checker = new LanguageToolEvaluator(indexTopDir);
// use this to run AtD as the backend, so results can easily be compared to LT:
//checker = new AtDEvalChecker("http://en.service.afterthedeadline.com/checkDocument?key=test&data=");
return checker;
}
@NotNull
protected ErrorCorpus getCorpus(File dir) throws IOException {
return new PedlerCorpus(dir);
}
void close() {
evaluator.close();
}
int getSentencesChecked() {
return sentenceCount;
}
int getErrorsChecked() {
return errorsInCorpusCount;
}
int getRealErrorsFound() {
return goodMatches;
}
int getRealErrorsFoundWithGoodSuggestion() {
return perfectMatches;
}
void run(File dir) throws IOException {
System.out.println("Output explanation:");
System.out.println(" [ ] = this is not an expected error");
System.out.println(" [+ ] = this is an expected error");
System.out.println(" [++] = this is an expected error and the first suggestion is correct");
System.out.println(" [//] = not counted because already matches by a different rule");
System.out.println();
ErrorCorpus corpus = getCorpus(dir);
checkLines(corpus);
printResults();
}
private void checkLines(ErrorCorpus corpus) throws IOException {<FILL_FUNCTION_BODY>}
private boolean isConfusionRule(RuleMatch match) {
return match.getRule().getId().equals("CONFUSION_RULE");
}
private void printResults() {
System.out.println();
System.out.println(sentenceCount + " lines checked with " + errorsInCorpusCount + " errors.");
System.out.println("Confusion rule matches: " + perfectConfusionMatches+ " perfect, "
+ goodConfusionMatches + " good, " + badConfusionMatches + " bad (" + badConfusionMatchWords + ")");
System.out.println("\nCounting matches, no matter whether the first suggestion is correct:");
System.out.print(" " + goodMatches + " out of " + matchCount + " matches are real errors");
float goodPrecision = (float)goodMatches / matchCount;
float goodRecall = (float)goodMatches / errorsInCorpusCount;
System.out.printf(" => %.2f precision, %.2f recall\n", goodPrecision, goodRecall);
System.out.printf(" => %.4f F(0.5) measure\n",
FMeasure.getWeightedFMeasure(goodPrecision, goodRecall));
System.out.println("\nCounting only matches with a perfect first suggestion:");
System.out.print(" " + perfectMatches + " out of " + matchCount + " matches are real errors");
float perfectPrecision = (float)perfectMatches / matchCount;
float perfectRecall = (float)perfectMatches / errorsInCorpusCount;
System.out.printf(" => %.2f precision, %.2f recall\n", perfectPrecision, perfectRecall);
System.out.printf(" => %.4f F(0.5) measure\n",
FMeasure.getWeightedFMeasure(perfectPrecision, perfectRecall));
}
private boolean errorAlreadyCounted(RuleMatch match, List<Span> detectedErrorPositions) {
for (Span span : detectedErrorPositions) {
Span matchSpan = new Span(match.getFromPos(), match.getToPos());
if (span.covers(matchSpan) || matchSpan.covers(span)) {
return true;
}
}
return false;
}
public static void main(String[] args) throws IOException {
if (args.length != 1 && args.length != 2) {
System.out.println("Usage: " + RealWordCorpusEvaluator.class.getSimpleName() + " <corpusDirectory> [languageModel]");
System.out.println(" [languageModel] is a Lucene index directory with ngram frequency information (optional)");
System.exit(1);
}
File languageModelTopDir = null;
if (args.length == 1) {
System.out.println("Running without language model");
} else {
languageModelTopDir = new File(args[1]);
System.out.println("Running with language model from " + languageModelTopDir);
}
RealWordCorpusEvaluator evaluator = new RealWordCorpusEvaluator(languageModelTopDir);
evaluator.run(new File(args[0]));
evaluator.close();
}
}
|
for (ErrorSentence sentence : corpus) {
List<RuleMatch> matches = evaluator.check(sentence.getAnnotatedText());
sentenceCount++;
errorsInCorpusCount += sentence.getErrors().size();
System.out.println(sentence.getMarkupText() + " => " + matches.size());
for (RuleMatch match : matches) {
int length = match.getToPos() - match.getFromPos();
System.out.println(StringUtils.repeat(" ", match.getFromPos()) + StringUtils.repeat("^", length));
}
List<Span> detectedErrorPositions = new ArrayList<>();
for (RuleMatch match : matches) {
boolean alreadyCounted = errorAlreadyCounted(match, detectedErrorPositions);
if (!alreadyCounted && sentence.hasErrorCoveredByMatchAndGoodFirstSuggestion(match)) {
//TODO: it depends on the order of matches whether [++] comes before [ +] (it should!)
goodMatches++;
perfectMatches++;
matchCount++;
if (isConfusionRule(match)) {
perfectConfusionMatches++;
}
System.out.println(" [++] " + match + ": " + match.getSuggestedReplacements());
} else if (!alreadyCounted && sentence.hasErrorCoveredByMatch(match)) {
//} else if (!alreadyCounted && sentence.hasErrorOverlappingWithMatch(match)) {
goodMatches++;
matchCount++;
if (isConfusionRule(match)) {
goodConfusionMatches++;
}
System.out.println(" [+ ] " + match + ": " + match.getSuggestedReplacements());
} else if (alreadyCounted) {
System.out.println(" [//] " + match + ": " + match.getSuggestedReplacements());
} else {
System.out.println(" [ ] " + match + ": " + match.getSuggestedReplacements());
matchCount++;
if (isConfusionRule(match)) {
badConfusionMatches++;
badConfusionMatchWords.add(sentence.getMarkupText().substring(match.getFromPos(), match.getToPos()));
}
}
detectedErrorPositions.add(new Span(match.getFromPos(), match.getToPos()));
}
}
| 1,407
| 612
| 2,019
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/RealWordFalseAlarmEvaluator.java
|
RealWordFalseAlarmEvaluator
|
checkLines
|
class RealWordFalseAlarmEvaluator {
private static final boolean EVAL_MODE = true; // set to false to get data for homophones-info.txt
private static final int MAX_SENTENCES = 1000;
private static final int MAX_ERROR_DISPLAY = 50;
// the minimum number of sentences in homophones-info.txt, items with less sentences will be ignored (eval mode only):
private static final int MIN_SENTENCES = 0;
// maximum error rate of a homophone in homophones-info.txt, items with a larger error rate will be ignored (eval mode only):
private static final float MAX_ERROR_RATE = 10;
private final JLanguageTool lt;
private final ConfusionProbabilityRule confusionRule;
private final Map<String,List<ConfusionPair>> confusionPairs;
private final LanguageModel languageModel;
private int globalSentenceCount;
private int globalRuleMatches;
RealWordFalseAlarmEvaluator(File languageModelIndexDir) throws IOException {
Language lang = new AmericanEnglish();
try (InputStream inputStream = JLanguageTool.getDataBroker().getFromResourceDirAsStream("/en/confusion_sets.txt")) {
ConfusionSetLoader confusionSetLoader = new ConfusionSetLoader(lang);
confusionPairs = confusionSetLoader.loadConfusionPairs(inputStream);
}
lt = new JLanguageTool(new BritishEnglish());
List<Rule> rules = lt.getAllActiveRules();
for (Rule rule : rules) {
lt.disableRule(rule.getId());
}
languageModel = new LuceneLanguageModel(languageModelIndexDir);
confusionRule = new EnglishConfusionProbabilityRule(JLanguageTool.getMessageBundle(), languageModel, lang);
lt.addRule(confusionRule);
}
void close() {
if (languageModel != null) {
languageModel.close();
}
}
void run(File dir) throws IOException {
if (EVAL_MODE) {
System.out.println("Running in eval mode, no 'DATA' lines will be printed, only a subset of the homophones will be used.");
} else {
System.out.println("grep for '^DATA;' to get results in CVS format:");
System.out.println("DATA;word;sentence_count;errors_found;errors_percent");
}
File[] files = dir.listFiles();
//noinspection ConstantConditions
int fileCount = 1;
for (File file : files) {
if (!file.getName().endsWith(".txt")) {
System.out.println("Ignoring " + file + ", does not match *.txt");
continue;
}
try (FileInputStream fis = new FileInputStream(file)) {
System.out.println("===== Working on " + file.getName() + " (" + fileCount + "/" + files.length + ") =====");
checkLines(IOUtils.readLines(fis), file.getName().replace(".txt", ""));
fileCount++;
}
}
System.out.println("==============================");
System.out.println(globalSentenceCount + " sentences checked");
System.out.println(globalRuleMatches + " errors found");
float percentage = (float)globalRuleMatches/(float)globalSentenceCount*100;
System.out.printf("%.2f%% of sentences have a match\n", percentage);
}
private void checkLines(List<String> lines, String name) throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
if (args.length != 2) {
System.out.println("Usage: " + RealWordFalseAlarmEvaluator.class.getSimpleName() + " <languageModel> <sentenceDirectory>");
System.out.println(" <languageModel> is a Lucene index with ngram frequency information");
System.out.println(" <sentenceDirectory> is a directory with filenames like 'xx.txt' where 'xx' is the homophone");
System.exit(1);
}
RealWordFalseAlarmEvaluator evaluator = new RealWordFalseAlarmEvaluator(new File(args[0]));
File dir = new File(args[1]);
if (!dir.isDirectory()) {
throw new RuntimeException("Not a directory: " + dir);
}
evaluator.run(dir);
evaluator.close();
}
}
|
List<ConfusionPair> subConfusionPair = confusionPairs.get(name);
if (subConfusionPair == null) {
System.out.println("Skipping '" + name + "', homophone not loaded");
return;
}
if (subConfusionPair.size() > 1) {
System.err.println("WARN: will only use first confusion set of " + subConfusionPair.size() + ": " + subConfusionPair.get(0));
}
confusionRule.setConfusionPair(subConfusionPair.get(0));
int sentenceCount = 0;
int ruleMatches = 0;
for (String line : lines) {
List<RuleMatch> matches = lt.check(line);
sentenceCount++;
globalSentenceCount++;
if (matches.size() > 0) {
Set<String> suggestions = new HashSet<>();
for (RuleMatch match : matches) {
//System.out.println(" " + match + ": " + match.getSuggestedReplacements());
suggestions.addAll(match.getSuggestedReplacements());
ruleMatches++;
globalRuleMatches++;
}
if (ruleMatches <= MAX_ERROR_DISPLAY) {
System.out.println("[" + name + "] " + line + " => " + suggestions);
}
}
if (sentenceCount > MAX_SENTENCES) {
System.out.println("Max sentences (" + MAX_SENTENCES + ") reached, stopping");
break;
}
}
System.out.println(sentenceCount + " sentences checked");
System.out.println(ruleMatches + " errors found");
float percentage = (float)ruleMatches/(float)sentenceCount*100;
System.out.printf("%.2f%% of sentences have a match\n", percentage);
if (!EVAL_MODE) {
System.out.printf(Locale.ENGLISH, "DATA;%s;%d;%d;%.2f\n\n", name, sentenceCount, ruleMatches, percentage);
}
| 1,130
| 534
| 1,664
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/SpellCheckEvaluation.java
|
SpellCheckEvaluation
|
checkFile
|
class SpellCheckEvaluation {
private static final int MAX_SUGGESTIONS = 5;
private void run(Language language, File file) throws IOException {
JLanguageTool lt = getLanguageToolForSpellCheck(language);
checkFile(file, lt);
}
private JLanguageTool getLanguageToolForSpellCheck(Language language) {
JLanguageTool lt = new JLanguageTool(language);
for (Rule rule : lt.getAllActiveRules()) {
if (!rule.isDictionaryBasedSpellingRule()) {
lt.disableRule(rule.getId());
}
}
return lt;
}
private void checkFile(File file, JLanguageTool lt) throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
if (args.length != 2) {
System.out.println("Usage: " + SpellCheckEvaluation.class.getSimpleName() + " <langCode> <textFile>");
System.exit(1);
}
SpellCheckEvaluation eval = new SpellCheckEvaluation();
eval.run(Languages.getLanguageForShortCode(args[0]), new File(args[1]));
}
}
|
try (
FileInputStream fis = new FileInputStream(file);
InputStreamReader reader = new InputStreamReader(fis, "utf-8");
BufferedReader br = new BufferedReader(reader)
) {
String line;
while ((line = br.readLine()) != null) {
List<RuleMatch> matches = lt.check(line);
for (RuleMatch match : matches) {
String covered = line.substring(match.getFromPos(), match.getToPos());
List<String> suggestions = match.getSuggestedReplacements();
List<String> limitedSuggestions = suggestions.subList(0, Math.min(MAX_SUGGESTIONS, suggestions.size()));
System.out.println(covered + ": " + limitedSuggestions);
}
}
}
| 320
| 209
| 529
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/eval/TatoebaEvaluator.java
|
TatoebaEvaluator
|
run
|
class TatoebaEvaluator {
private final static String template = "/home/dnaber/data/corpus/tatoeba/20191014/sentences-LANG-20191014-top1000.txt";
private void run() throws IOException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws IOException {
new TatoebaEvaluator().run();
}
}
|
for (Language lang : Languages.get()) {
//if (!lang.getShortCode().equals("sk")) {
// continue;
//}
File file = new File(template.replaceFirst("LANG", lang.getShortCode()));
if (!file.exists() || file.length() == 0) {
System.err.println("File not found or empty, skipping: " + file);
continue;
}
SentenceSourceChecker.main(new String[]{"-l", lang.getShortCode(), "-f", file.getAbsolutePath()});
}
| 123
| 145
| 268
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/messagechecker/LTMessageChecker.java
|
LTMessageChecker
|
run
|
class LTMessageChecker {
private static final boolean SPELLCHECK_ONLY = false;
private static final List<String> ruleExceptions = Arrays.asList("DE_CASE", "UPPERCASE_SENTENCE_START");
public static void main(String[] args) throws Exception {
if (args.length != 1) {
System.out.println("Usage: " + LTMessageChecker.class.getSimpleName() + " <langCode> | ALL");
System.exit(1);
}
LTMessageChecker check = new LTMessageChecker();
long start = System.currentTimeMillis();
if (args[0].equalsIgnoreCase("all")) {
for (Language lang : Languages.get()) {
check.run(lang);
}
} else {
check.run(Languages.getLanguageForShortCode(args[0]));
}
float time = (float) ((System.currentTimeMillis() - start) / 1000.0);
print("Total checking time: " + String.format("%.2f", time) + " seconds");
}
private static void print(String s) {
System.out.println("LTM: " + s);
}
private void run(Language lang)
throws IOException, IllegalAccessException, IllegalArgumentException, InvocationTargetException {<FILL_FUNCTION_BODY>}
void checkText(String textToCheck, JLanguageTool lt, Language lang, Rule r, ContextTools contextTools,
boolean isCorrection) throws IOException {
if (!textToCheck.isEmpty()) {
List<RuleMatch> matches = lt.check(textToCheck);
if (matches.size() > 0) {
List<RuleMatch> matchesToShow = new ArrayList<>();
for (RuleMatch match : matches) {
String ruleId = match.getRule().getId();
// exceptions for corrections
if (isCorrection && ruleExceptions.contains(ruleId)) {
continue;
}
if (!ruleId.equals(r.getId())) {
matchesToShow.add(match);
}
}
if (matchesToShow.size() > 0) {
print("Source: " + r.getFullId());
for (RuleMatch match : matchesToShow) {
print(lang.toAdvancedTypography(match.getMessage()));
print(contextTools.getContext(match.getFromPos(), match.getToPos(), textToCheck));
print("");
}
}
}
}
}
}
|
long start = System.currentTimeMillis();
JLanguageTool lt = new JLanguageTool(lang);
ContextTools contextTools = new ContextTools();
contextTools.setErrorMarker("**", "**");
contextTools.setEscapeHtml(false);
print("Checking language: " + lang.getName() + " (" + lang.getShortCodeWithCountryAndVariant() + ")");
print("Version: " + JLanguageTool.VERSION + " (" + JLanguageTool.BUILD_DATE + ", " + JLanguageTool.GIT_SHORT_ID + ")");
if (SPELLCHECK_ONLY) {
int enabledRules = 0;
print("NOTE: Running spell check only");
for (Rule r : lt.getAllRules()) {
if (!r.isDictionaryBasedSpellingRule()) {
lt.disableRule(r.getId());
} else {
enabledRules++;
}
}
if (enabledRules == 0) {
System.out.println("Error: No rule found to enable. Make sure to use a language code like 'en-US' (not just 'en') that supports spell checking.");
System.exit(1);
}
}
for (Rule r : lt.getAllRules()) {
String message = "";
try {
Method m = r.getClass().getMethod("getMessage", null);
message = (String) m.invoke(r);
} catch (NoSuchMethodException e) {
// do nothing
}
String shortMessage = "";
try {
Method m = r.getClass().getMethod("getShortMessage", null);
shortMessage = (String) m.invoke(r);
} catch (NoSuchMethodException e) {
// do nothing
}
if (!message.isEmpty()) {
message = lang.toAdvancedTypography(message);
message = message.replaceAll("<suggestion>", lang.getOpeningDoubleQuote()).replaceAll("</suggestion>",
lang.getClosingDoubleQuote());
message = message.replaceAll("<[^>]+>", "");
}
String corrections = "";
for (IncorrectExample ie : r.getIncorrectExamples()) {
corrections = corrections + String.join("; ", ie.getCorrections()) + "; ";
}
// don't require upper case sentence start in description (?)
// Advanced typography in rule description is not used in production. Here is used to avoid too many positives.
String ruleDescription = lang.toAdvancedTypography(StringTools.uppercaseFirstChar(r.getDescription()));
String textToCheck = message + "\n\n" + shortMessage + "\n\n" + ruleDescription;
checkText(textToCheck, lt, lang, r, contextTools, false);
checkText(corrections, lt, lang, r, contextTools, true);
}
float time = (float) ((System.currentTimeMillis() - start) / 1000.0);
print("Checked " + lang.getName() + " (" + lang.getShortCodeWithCountryAndVariant() + ") in "
+ String.format("%.2f", time) + " seconds");
| 641
| 795
| 1,436
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/simulation/DocProvider.java
|
DocProvider
|
getDoc
|
class DocProvider {
private static final int MAX_VAL = 20_000;
private final List<String> docs;
private Random rnd;
DocProvider(List<String> docs) {
this.docs = docs;
reset();
}
void reset() {
rnd = new Random(120); // don't change without checking that request size distribution is still realistic
}
String getDoc() {<FILL_FUNCTION_BODY>}
int getWeightedRandomLength() {
int max = getRandomMaxLength();
int min = max == MAX_VAL ? 550 : max - 49;
// just assume uniform length distribution inside these ranges (not quite true)...
return min + this.rnd.nextInt(max - min);
}
private int getRandomMaxLength() {
double rnd = this.rnd.nextFloat() * 100;
// this leads to a distribution roughly as we see it in the production system:
float fix = 15.6f;
if (rnd < 32) {
return 49;
} else if (rnd < 50 + fix) {
return 99;
} else if (rnd < 60 + fix) {
return 149;
} else if (rnd < 67 + fix) {
return 199;
} else if (rnd < 72 + fix) {
return 249;
} else if (rnd < 75 + fix) {
return 299;
} else if (rnd < 78 + fix) {
return 349;
} else if (rnd < 80 + fix) {
return 399;
} else if (rnd < 82 + fix) {
return 449;
} else if (rnd < 83 + fix) {
return 499;
} else if (rnd < 84 + fix) {
return 549;
} else {
// not quite correct...
return MAX_VAL;
}
}
}
|
int len = getWeightedRandomLength();
synchronized (docs) {
StringBuilder appended = new StringBuilder();
int paraSize = 0;
while (appended.length() < len) {
if (docs.size() == 0) {
throw new RuntimeException("Not enough docs left to provide another document");
}
String doc = docs.get(0);
appended.append(doc).append(" ");
paraSize += doc.length();
if (paraSize > 250 && appended.toString().endsWith(". ")) {
appended.append(doc).append("\n\n");
paraSize = 0;
}
docs.remove(0);
}
return appended.substring(0, len);
}
| 545
| 194
| 739
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/wiktionary/HomophoneExtractor.java
|
HomophoneExtractor
|
run
|
class HomophoneExtractor {
private static final Pattern homophonePattern = Pattern.compile("\\{\\{homophones\\|(.*?)\\}\\}");
private void run(String filename) throws FileNotFoundException {<FILL_FUNCTION_BODY>}
public static void main(String[] args) throws FileNotFoundException {
if (args.length != 1) {
System.out.println("Usage: " + HomophoneExtractor.class.getSimpleName() + " <xmlFilename>");
System.out.println(" <xmlFilename> is an unpacked Wiktionary dump");
System.exit(1);
}
HomophoneExtractor extractor = new HomophoneExtractor();
extractor.run(args[0]);
}
}
|
try (Scanner scanner = new Scanner(new File(filename))) {
String title = "";
int lineCount = 0;
long startTime = System.currentTimeMillis();
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
lineCount++;
if (line.contains("<title>") && line.contains("</title>")) {
title = line.substring(line.indexOf("<title>") + 7, line.indexOf("</title>"));
} else if (line.contains("lang=en")) {
Matcher m = homophonePattern.matcher(line);
if (m.find()) {
String homophonesData = m.group(1).replaceFirst("\\|?lang=en\\|?", "");
String[] homophones = homophonesData.split("\\|");
List<String> allHomophones = new ArrayList<>();
allHomophones.add(title);
allHomophones.addAll(Arrays.asList(homophones));
allHomophones.sort(null);
System.out.println(String.join(", ", allHomophones));
}
}
if (lineCount % 100_000 == 0) {
long endTime = System.currentTimeMillis();
System.err.println(lineCount + " (" + (endTime-startTime) + "ms)...");
startTime = System.currentTimeMillis();
}
}
}
| 198
| 379
| 577
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/dev/wordsimilarity/BaseKeyboardDistance.java
|
Position
|
getPosition
|
class Position {
int row;
int column;
Position(int row, int column) {
this.row = row;
this.column = column;
}
float distanceTo(Position other) {
return Math.abs(column - other.column) + Math.abs(row - other.row);
}
@Override
public String toString() {
return "Position{row=" + row + ", column=" + column + '}';
}
}
@Override
public float getDistance(char c1, char c2) {
Position p1 = getPosition(c1);
Position p2 = getPosition(c2);
return p1.distanceTo(p2);
}
private Position getPosition(char searchKey) {<FILL_FUNCTION_BODY>
|
char searchKeyLowerCase = Character.toLowerCase(searchKey);
int row = -1;
int column = -1;
int rowCount = 0;
int columnCount;
for (char[] rowKeys : getKeys()) {
columnCount = 0;
for (char c : rowKeys) {
if (c == searchKeyLowerCase) {
row = rowCount;
column = columnCount;
}
columnCount++;
}
rowCount++;
}
if (row == -1 || column == -1) {
throw new RuntimeException("Could not find '" + searchKey + "' on keyboard - only letters are supported");
}
return new Position(row, column);
| 208
| 180
| 388
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-dev/src/main/java/org/languagetool/rules/spelling/suggestions/SuggestionChangesTest.java
|
SuggestionTestThread
|
doWork
|
class SuggestionTestThread extends Thread {
private final Random sampler = new Random(0);
private final ConcurrentLinkedQueue<Pair<SuggestionTestResultData, String>> results;
private JLanguageTool standardLt;
private Rule standardRule;
private final Map<SuggestionChangesExperiment, Rule> rules;
private final BlockingQueue<SuggestionTestData> tasks;
@SuppressWarnings("AssignmentOrReturnOfFieldWithMutableType")
SuggestionTestThread(BlockingQueue<SuggestionTestData> tasks, ConcurrentLinkedQueue<Pair<SuggestionTestResultData, String>> results) {
rules = new HashMap<>();
this.tasks = tasks;
this.results = results;
}
@Override
public void run() {
Language lang = Languages.getLanguageForShortCode(SuggestionsChanges.getInstance().getConfig().language);
init(lang);
while (!isInterrupted()) {
try {
SuggestionTestData entry = tasks.poll(1L, TimeUnit.SECONDS);
if (entry == null) {
break;
} else {
doWork(entry);
}
} catch (InterruptedException | IOException e) {
throw new RuntimeException(e);
}
}
}
private void init(Language lang) {
Iterator<SuggestionChangesExperiment> iterator = SuggestionsChanges.getInstance().getExperiments().iterator();
// parameters for experiments are shared via Singleton, so initialization must block
synchronized (tasks) {
SuggestionsChanges.getInstance().setCurrentExperiment(null);
standardLt = new JLanguageTool(lang);
standardRule = standardLt.getAllRules().stream().filter(Rule::isDictionaryBasedSpellingRule)
.findFirst().orElse(null);
while (iterator.hasNext()) {
SuggestionChangesExperiment experiment = iterator.next();
SuggestionsChanges.getInstance().setCurrentExperiment(experiment);
JLanguageTool lt = new JLanguageTool(lang);
try {
lt.activateLanguageModelRules(new File(SuggestionsChanges.getInstance().getConfig().ngramLocation));
} catch (IOException e) {
throw new RuntimeException(e);
}
Rule spellerRule = lt.getAllRules().stream().filter(Rule::isDictionaryBasedSpellingRule)
.findFirst().orElse(null);
rules.put(experiment, spellerRule);
}
}
}
void doWork(SuggestionTestData entry) throws IOException, InterruptedException {<FILL_FUNCTION_BODY>}
}
|
AnalyzedSentence sentence = standardLt.getAnalyzedSentence(entry.getSentence());
if (entry.getDataset().enforceCorrect) {
AnalyzedSentence correction = standardLt.getAnalyzedSentence(entry.getCorrection());
RuleMatch[] correctionMatches = standardRule.match(correction);
if (correctionMatches.length != 0) {
String message = String.format("Error found in sentence '%s', ignoring because of 'enforceCorrect' flag.%n", entry.getCorrection());
results.add(Pair.of(new SuggestionTestResultData(entry, null), message));
return;
}
}
// needs to be here to make combined filtering + sampling more transparent
if (sampler.nextFloat() > entry.getDataset().sampleRate) {
return;
}
List<SuggestionChangesExperiment> experiments = SuggestionsChanges.getInstance().getExperiments();
int experimentId = 0;
StringBuilder message = new StringBuilder();
message.append(String.format("Checking candidates for correction '%s' -> '%s' in sentence '%s':%n",
entry.getCovered(), entry.getReplacement(), entry.getSentence()));
List<String> correct = new ArrayList<>();
List<RuleMatch> gatheredSuggestions = new ArrayList<>(experiments.size());
int textSize = sentence.getText().length();
for (SuggestionChangesExperiment experiment : experiments) {
experimentId++;
Rule spellerRule = rules.get(experiment);
if (spellerRule == null) {
continue;
}
long startTime = System.currentTimeMillis();
RuleMatch[] matches = spellerRule.match(sentence);
long computationTime = System.currentTimeMillis() - startTime;
for (RuleMatch match : matches) {
String matchedWord = sentence.getText().substring(match.getFromPos(), match.getToPos());
if (!matchedWord.equals(entry.getCovered())) {
//System.out.println("Other spelling error detected, ignoring: " + matchedWord + " / " + covered);
continue;
}
List<String> suggestions = match.getSuggestedReplacements();
gatheredSuggestions.add(match);
if (suggestions.isEmpty()) { // TODO should be tracked as well
continue;
}
int position = suggestions.indexOf(entry.getReplacement());
SuggestionsChanges.getInstance().trackExperimentResult(Pair.of(experiment, entry.getDataset()),
position, textSize, computationTime);
if (position == 0) {
correct.add(String.valueOf(experimentId));
}
message.append(String.format("Experiment #%d: %s -> accepted @ #%d%n", experimentId, suggestions, position));
}
}
message.append(String.format("Correct suggestions by experiments: %s%n", String.join(", ", correct)));
results.add(Pair.of(new SuggestionTestResultData(entry, gatheredSuggestions), message.toString()));
| 682
| 784
| 1,466
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-gui-commons/src/main/java/org/languagetool/gui/AboutDialog.java
|
AboutDialog
|
getMaintainers
|
class AboutDialog {
private final ResourceBundle messages;
private final Component parent;
public AboutDialog(ResourceBundle messages, Component parent) {
this.messages = messages;
this.parent = parent;
}
public void show() {
String aboutText = Tools.getLabel(messages.getString("guiMenuAbout"));
JTextPane aboutPane = new JTextPane();
aboutPane.setBackground(new Color(0, 0, 0, 0));
aboutPane.setBorder(BorderFactory.createEmptyBorder());
aboutPane.setContentType("text/html");
aboutPane.setEditable(false);
aboutPane.setOpaque(false);
aboutPane.setText(String.format("<html>"
+ "<p>LanguageTool %s (%s, %s)<br>"
+ "Copyright (C) 2005-2024 the LanguageTool community and Daniel Naber<br>"
+ "This software is licensed under the GNU Lesser General Public License.<br>"
+ "<a href=\"https://www.languagetool.org\">https://www.languagetool.org</a><br>"
+ "Java max/total/free memory: %sMB, %sMB, %sMB</p>"
+ "<p>Maintainers or former maintainers of the language modules -<br>"
+ "(*) means language is unmaintained in LanguageTool:</p><br>"
+ "</html>", JLanguageTool.VERSION,
JLanguageTool.BUILD_DATE,
JLanguageTool.GIT_SHORT_ID,
Runtime.getRuntime().maxMemory()/1024/1024,
Runtime.getRuntime().totalMemory()/1024/1024,
Runtime.getRuntime().freeMemory()/1024/1024));
Tools.addHyperlinkListener(aboutPane);
JTextPane maintainersPane = new JTextPane();
maintainersPane.setBackground(new Color(0, 0, 0, 0));
maintainersPane.setBorder(BorderFactory.createEmptyBorder());
maintainersPane.setContentType("text/html");
maintainersPane.setEditable(false);
maintainersPane.setOpaque(false);
maintainersPane.setText(getMaintainers());
int prefWidth = Math.max(520, maintainersPane.getPreferredSize().width);
int maxHeight = Toolkit.getDefaultToolkit().getScreenSize().height / 2;
maxHeight = Math.min(maintainersPane.getPreferredSize().height, maxHeight);
maintainersPane.setPreferredSize(new Dimension(prefWidth, maxHeight));
JScrollPane scrollPane = new JScrollPane(maintainersPane);
scrollPane.setBorder(BorderFactory.createEmptyBorder());
JPanel panel = new JPanel();
panel.setLayout(new BoxLayout(panel, BoxLayout.PAGE_AXIS));
panel.add(aboutPane);
panel.add(scrollPane);
JOptionPane.showMessageDialog(parent, panel,
aboutText, JOptionPane.INFORMATION_MESSAGE);
}
private String getMaintainers() {<FILL_FUNCTION_BODY>}
}
|
TreeMap<String, Language> list = new TreeMap<>();
for (Language lang : Languages.get()) {
if (!lang.isVariant()) {
if (lang.getMaintainers() != null) {
list.put(messages.getString(lang.getShortCode()), lang);
}
}
}
StringBuilder str = new StringBuilder();
str.append("<table border=0 cellspacing=0 cellpadding=0>");
for (Map.Entry<String, Language> entry : list.entrySet()) {
str.append("<tr valign=\"top\"><td>");
str.append(entry.getKey());
if (entry.getValue().getMaintainedState() == LanguageMaintainedState.LookingForNewMaintainer) {
str.append("(*)");
}
str.append(":</td>");
str.append("<td> </td>");
str.append("<td>");
int i = 0;
Contributor[] maintainers = list.get(entry.getKey()).getMaintainers();
if (maintainers != null) {
for (Contributor contributor : maintainers) {
if (i > 0) {
str.append(", ");
if (i % 3 == 0) {
str.append("<br>");
}
}
str.append(contributor.getName());
i++;
}
}
str.append("</td></tr>");
}
str.append("</table>");
return str.toString();
| 836
| 402
| 1,238
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-gui-commons/src/main/java/org/languagetool/gui/CategoryNode.java
|
CategoryNode
|
toString
|
class CategoryNode extends DefaultMutableTreeNode {
private final Category category;
private boolean enabled;
CategoryNode(Category category, boolean enabled) {
super(category);
this.category = category;
this.enabled = enabled;
}
Category getCategory() {
return category;
}
boolean isEnabled() {
return enabled;
}
void setEnabled(boolean enabled) {
this.enabled = enabled;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
int children = this.getChildCount();
int selected = 0;
for (int i = 0; i < children; i++) {
RuleNode child = (RuleNode) this.getChildAt(i);
if (child.isEnabled()) {
selected++;
}
}
return String.format("%s (%d/%d)", category.getName(), selected, children);
| 145
| 100
| 245
|
<methods>public void <init>() ,public void <init>(java.lang.Object) ,public void <init>(java.lang.Object, boolean) ,public void add(javax.swing.tree.MutableTreeNode) ,public Enumeration<javax.swing.tree.TreeNode> breadthFirstEnumeration() ,public Enumeration<javax.swing.tree.TreeNode> children() ,public java.lang.Object clone() ,public Enumeration<javax.swing.tree.TreeNode> depthFirstEnumeration() ,public boolean getAllowsChildren() ,public javax.swing.tree.TreeNode getChildAfter(javax.swing.tree.TreeNode) ,public javax.swing.tree.TreeNode getChildAt(int) ,public javax.swing.tree.TreeNode getChildBefore(javax.swing.tree.TreeNode) ,public int getChildCount() ,public int getDepth() ,public javax.swing.tree.TreeNode getFirstChild() ,public javax.swing.tree.DefaultMutableTreeNode getFirstLeaf() ,public int getIndex(javax.swing.tree.TreeNode) ,public javax.swing.tree.TreeNode getLastChild() ,public javax.swing.tree.DefaultMutableTreeNode getLastLeaf() ,public int getLeafCount() ,public int getLevel() ,public javax.swing.tree.DefaultMutableTreeNode getNextLeaf() ,public javax.swing.tree.DefaultMutableTreeNode getNextNode() ,public javax.swing.tree.DefaultMutableTreeNode getNextSibling() ,public javax.swing.tree.TreeNode getParent() ,public javax.swing.tree.TreeNode[] getPath() ,public javax.swing.tree.DefaultMutableTreeNode getPreviousLeaf() ,public javax.swing.tree.DefaultMutableTreeNode getPreviousNode() ,public javax.swing.tree.DefaultMutableTreeNode getPreviousSibling() ,public javax.swing.tree.TreeNode getRoot() ,public javax.swing.tree.TreeNode getSharedAncestor(javax.swing.tree.DefaultMutableTreeNode) ,public int getSiblingCount() ,public java.lang.Object getUserObject() ,public java.lang.Object[] getUserObjectPath() ,public void insert(javax.swing.tree.MutableTreeNode, int) ,public boolean isLeaf() ,public boolean isNodeAncestor(javax.swing.tree.TreeNode) ,public boolean isNodeChild(javax.swing.tree.TreeNode) ,public boolean isNodeDescendant(javax.swing.tree.DefaultMutableTreeNode) ,public boolean isNodeRelated(javax.swing.tree.DefaultMutableTreeNode) ,public boolean isNodeSibling(javax.swing.tree.TreeNode) ,public boolean isRoot() ,public Enumeration<javax.swing.tree.TreeNode> pathFromAncestorEnumeration(javax.swing.tree.TreeNode) ,public Enumeration<javax.swing.tree.TreeNode> postorderEnumeration() ,public Enumeration<javax.swing.tree.TreeNode> preorderEnumeration() ,public void remove(int) ,public void remove(javax.swing.tree.MutableTreeNode) ,public void removeAllChildren() ,public void removeFromParent() ,public void setAllowsChildren(boolean) ,public void setParent(javax.swing.tree.MutableTreeNode) ,public void setUserObject(java.lang.Object) ,public java.lang.String toString() <variables>public static final Enumeration<javax.swing.tree.TreeNode> EMPTY_ENUMERATION,protected boolean allowsChildren,protected Vector<javax.swing.tree.TreeNode> children,protected javax.swing.tree.MutableTreeNode parent,private static final long serialVersionUID,protected transient java.lang.Object userObject
|
languagetool-org_languagetool
|
languagetool/languagetool-gui-commons/src/main/java/org/languagetool/gui/CheckBoxTreeCellRenderer.java
|
CheckBoxTreeCellRenderer
|
getTreeCellRendererComponent
|
class CheckBoxTreeCellRenderer extends JPanel implements TreeCellRenderer {
private final DefaultTreeCellRenderer renderer = new DefaultTreeCellRenderer();
private final JCheckBox checkBox = new JCheckBox();
private Component defaultComponent;
CheckBoxTreeCellRenderer() {
setLayout(new BorderLayout());
setOpaque(false);
checkBox.setOpaque(false);
renderer.setLeafIcon(null);
add(checkBox, BorderLayout.WEST);
}
@Override
public Component getTreeCellRendererComponent(JTree tree, Object value, boolean selected, boolean expanded, boolean leaf, int row, boolean hasFocus) {<FILL_FUNCTION_BODY>}
}
|
Component component = renderer.getTreeCellRendererComponent(tree, value, selected, expanded, leaf, row, hasFocus);
if (value instanceof CategoryNode) {
if (defaultComponent != null) {
remove(defaultComponent);
}
defaultComponent = component;
add(component, BorderLayout.CENTER);
CategoryNode node = (CategoryNode) value;
checkBox.setSelected(node.isEnabled());
return this;
}
if (value instanceof RuleNode) {
if (defaultComponent != null) {
remove(defaultComponent);
}
defaultComponent = component;
add(component, BorderLayout.CENTER);
RuleNode node = (RuleNode) value;
checkBox.setSelected(node.isEnabled());
return this;
}
return component;
| 180
| 210
| 390
|
<methods>public void <init>() ,public void <init>(java.awt.LayoutManager) ,public void <init>(boolean) ,public void <init>(java.awt.LayoutManager, boolean) ,public javax.accessibility.AccessibleContext getAccessibleContext() ,public javax.swing.plaf.PanelUI getUI() ,public java.lang.String getUIClassID() ,public void setUI(javax.swing.plaf.PanelUI) ,public void updateUI() <variables>private static final java.lang.String uiClassID
|
languagetool-org_languagetool
|
languagetool/languagetool-gui-commons/src/main/java/org/languagetool/gui/ConfigurationDialog.java
|
CategoryComparator
|
updateRulesTrees
|
class CategoryComparator implements Comparator<Rule> {
@Override
public int compare(Rule r1, Rule r2) {
boolean hasCat = r1.getCategory() != null && r2.getCategory() != null;
if (hasCat) {
int res = r1.getCategory().getName().compareTo(r2.getCategory().getName());
if (res == 0) {
return r1.getDescription() != null && r2.getDescription() != null ? r1.getDescription().compareToIgnoreCase(r2.getDescription()) : 0;
}
return res;
}
return r1.getDescription() != null && r2.getDescription() != null ? r1.getDescription().compareToIgnoreCase(r2.getDescription()) : 0;
}
}
/**
* Update display of rules tree
*/
private void updateRulesTrees(List<Rule> rules) {<FILL_FUNCTION_BODY>
|
String[] specialTabNames = config.getSpecialTabNames();
int numConfigTrees = 2 + specialTabNames.length;
for (int i = 0; i < numConfigTrees; i++) {
if(i == 0) {
rootNode[i] = createTree(rules, false, null, rootNode[i]); // grammar options
} else if(i == 1) {
rootNode[i] = createTree(rules, true, null, rootNode[i]); // Style options
} else {
rootNode[i] = createTree(rules, true, specialTabNames[i - 2], rootNode[i]); // Special tab options
}
configTree[i].setModel(getTreeModel(rootNode[i], rules));
}
| 251
| 195
| 446
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-gui-commons/src/main/java/org/languagetool/gui/TreeListener.java
|
TreeListener
|
handle
|
class TreeListener implements KeyListener, MouseListener, TreeWillExpandListener {
static void install(JTree tree) {
TreeListener listener = new TreeListener(tree);
tree.addMouseListener(listener);
tree.addKeyListener(listener);
tree.addTreeWillExpandListener(listener);
}
private static final Dimension checkBoxDimension = new JCheckBox().getPreferredSize();
private final JTree tree;
private TreeListener(JTree tree) {
this.tree = tree;
}
@Override
public void keyTyped(KeyEvent e) {
}
@Override
public void keyPressed(KeyEvent e) {
if (e.getKeyCode() == KeyEvent.VK_SPACE) {
TreePath[] paths = tree.getSelectionPaths();
if (paths != null) {
for (TreePath path : paths) {
handle(path);
}
}
}
}
@Override
public void keyReleased(KeyEvent e) {
}
@Override
public void mouseClicked(MouseEvent e) {
}
@Override
public void mousePressed(MouseEvent e) {
int x = e.getX();
int y = e.getY();
TreePath path = tree.getPathForLocation(x, y);
if (isOverCheckBox(x, y, path)) {
handle(path);
}
}
@Override
public void mouseReleased(MouseEvent e) {
}
@Override
public void mouseEntered(MouseEvent e) {
}
@Override
public void mouseExited(MouseEvent e) {
}
private void handle(TreePath path) {<FILL_FUNCTION_BODY>}
private boolean isOverCheckBox(int x, int y, TreePath path) {
if ((path == null) || (path.getPathCount() == 0)) {
return false;
}
if (!isValidNode(path.getLastPathComponent())) {
return false;
}
//checkbox is east
//int offset = tree.getPathBounds(path).x + tree.getPathBounds(path).width - checkBoxDimension.width;
//if (x < offset) {
//checkbox is west
int offset = tree.getPathBounds(path).x + checkBoxDimension.width;
if (x > offset) {
return false;
}
return true;
}
private boolean isValidNode(Object c) {
return ((c instanceof CategoryNode) || (c instanceof RuleNode));
}
@Override
public void treeWillExpand(TreeExpansionEvent e) throws ExpandVetoException {
Point cursorPosition = MouseInfo.getPointerInfo().getLocation();
Point treePosition = tree.getLocationOnScreen();
int x = (int) (cursorPosition.getX() - treePosition.getX());
int y = (int) (cursorPosition.getY() - treePosition.getY());
TreePath path = tree.getPathForLocation(x, y);
if (isOverCheckBox(x, y, path)) {
throw new ExpandVetoException(e);
}
}
@Override
public void treeWillCollapse(TreeExpansionEvent e) throws ExpandVetoException {
treeWillExpand(e);
}
}
|
if ((path != null) && (path.getPathCount() > 0)) {
if (path.getLastPathComponent() instanceof CategoryNode) {
DefaultTreeModel model = (DefaultTreeModel) tree.getModel();
CategoryNode node = (CategoryNode) path.getLastPathComponent();
node.setEnabled(!node.isEnabled());
model.nodeChanged(node);
for (int i = 0; i < node.getChildCount(); i++) {
RuleNode child = (RuleNode) node.getChildAt(i);
if (child.isEnabled() != node.isEnabled()) {
child.setEnabled(node.isEnabled());
model.nodeChanged(child);
}
}
}
if (path.getLastPathComponent() instanceof RuleNode) {
DefaultTreeModel model = (DefaultTreeModel) tree.getModel();
RuleNode node = (RuleNode) path.getLastPathComponent();
node.setEnabled(!node.isEnabled());
model.nodeChanged(node);
if (node.isEnabled()) {
CategoryNode parent = (CategoryNode) node.getParent();
parent.setEnabled(true);
}
model.nodeChanged(node.getParent());
}
}
| 862
| 311
| 1,173
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-http-client/src/main/java/org/languagetool/remote/CheckConfigurationBuilder.java
|
CheckConfigurationBuilder
|
build
|
class CheckConfigurationBuilder {
private final String langCode;
private String motherTongueLangCode;
private boolean autoDetectLanguage;
private boolean enabledOnly;
private List<String> enabledRuleIds = new ArrayList<>();
private List<String> disabledRuleIds = new ArrayList<>();
private String mode = null;
private String level = null;
private List<String> ruleValues = new ArrayList<>();
private String textSessionID = null;
private String username = null;
private String apiKey = null;
/**
* @param langCode a language code like {@code en} or {@code en-US}
*/
public CheckConfigurationBuilder(String langCode) {
this.langCode = Objects.requireNonNull(langCode);
}
/**
* A configuration that causes the server to automatically detected the text language.
* Note that this requires at least a few sentences of text to work reliably.
*/
public CheckConfigurationBuilder() {
this.langCode = null;
this.autoDetectLanguage = true;
}
public CheckConfiguration build() {<FILL_FUNCTION_BODY>}
public CheckConfigurationBuilder setMotherTongueLangCode(String motherTongueLangCode) {
this.motherTongueLangCode = motherTongueLangCode;
return this;
}
public CheckConfigurationBuilder enabledRuleIds(List<String> ruleIds) {
this.enabledRuleIds = Objects.requireNonNull(ruleIds);
return this;
}
public CheckConfigurationBuilder enabledRuleIds(String... ruleIds) {
return enabledRuleIds(Arrays.asList(ruleIds));
}
public CheckConfigurationBuilder enabledOnly() {
this.enabledOnly = true;
return this;
}
public CheckConfigurationBuilder disabledRuleIds(List<String> ruleIds) {
this.disabledRuleIds = Objects.requireNonNull(ruleIds);
return this;
}
public CheckConfigurationBuilder disabledRuleIds(String... ruleIds) {
return disabledRuleIds(Arrays.asList(ruleIds));
}
public CheckConfigurationBuilder mode(String mode) {
this.mode = mode;
return this;
}
public CheckConfigurationBuilder level(String level) {
this.level = level;
return this;
}
public CheckConfigurationBuilder ruleValues(List<String> ruleValues) {
this.ruleValues = Objects.requireNonNull(ruleValues);
return this;
}
public CheckConfigurationBuilder textSessionID(String textSessionID) {
this.textSessionID = textSessionID;
return this;
}
public CheckConfigurationBuilder username(String username) {
this.username = username;
return this;
}
public CheckConfigurationBuilder apiKey(String apiKey) {
this.apiKey = apiKey;
return this;
}
}
|
if (enabledOnly && enabledRuleIds.isEmpty()) {
throw new IllegalStateException("You cannot use 'enabledOnly' when you haven't set rule ids to be enabled");
}
return new CheckConfiguration(langCode, motherTongueLangCode, autoDetectLanguage, enabledRuleIds, enabledOnly,
disabledRuleIds, mode, level, ruleValues, textSessionID, username, apiKey);
| 737
| 101
| 838
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-http-client/src/main/java/org/languagetool/remote/CorrectionLoopFinderApi.java
|
CorrectionLoopFinderApi
|
main
|
class CorrectionLoopFinderApi {
private final static int linesToSkip = 0;
private static void run(Configuration cfg) throws IOException {
Scanner sc = new Scanner(cfg.inputFile);
int lineCount = 0;
while (sc.hasNextLine()) {
String line = sc.nextLine();
lineCount++;
if (lineCount < linesToSkip) {
if (lineCount % 1000 == 0) {
System.out.println(lineCount + " skipped ...");
}
continue;
}
List<RemoteRuleMatch> matches = null;
try {
matches = cfg.lt.check(line, cfg.ltConfig, cfg.customParams).getMatches();
} catch (RuntimeException e) {
System.err.println("An exception occurred: " + e.getMessage());
}
if (matches != null) {
for (RemoteRuleMatch match : matches) {
int suggCount = 0;
for (String repl : match.getReplacements().get()) {
if (++suggCount > 5) {
break;
}
String corr = new StringBuilder(line).replace(match.getErrorOffset(), match.getErrorOffset()+match.getErrorLength(), repl).toString();
//System.out.println(line + " => " + corr);
List<RemoteRuleMatch> corrMatches = null;
try {
corrMatches = cfg.lt.check(corr, cfg.ltConfig, cfg.customParams).getMatches();
} catch (RuntimeException e) {
System.err.println("An exception occurred: " + e.getMessage());
}
if (corrMatches == null) {
continue;
}
for (RemoteRuleMatch corrMatch : corrMatches) {
for (String repl2 : corrMatch.getReplacements().get()) {
String corr2 = new StringBuilder(corr).replace(corrMatch.getErrorOffset(), corrMatch.getErrorOffset()+corrMatch.getErrorLength(), repl2).toString();
if (corr2.equals(line)) {
cfg.out.write("LOOP by " + getFullId(match) + "/" + getFullId(corrMatch) + ": " +
line.substring(match.getErrorOffset(), match.getErrorOffset()+match.getErrorLength()) + " => " + repl + "\n");
cfg.out.write(" " + line + "\n");
cfg.out.write(" " + corr + "\n");
}
}
}
}
}
cfg.out.flush();
}
if (lineCount % 1000 == 0) {
System.out.println(lineCount + "...");
}
}
}
public static void main(String[] args) throws IOException {<FILL_FUNCTION_BODY>}
static private class Configuration {
String remoteServer;
String userName;
String annotatorName;
String apiKey;
String inputFilePath;
String outputFilePath;
String languageCode;
File inputFile;
File outputFile;
boolean automaticAnnotation;
CheckConfiguration ltConfig;
RemoteLanguageTool lt;
Map<String, String> customParams = new HashMap<>();
FileWriter out;
StringBuilder outStrB;
String ansiDefault = "";
String ansiHighlight = "";
List<String> enabledOnlyRules = new ArrayList<String>();
List<String> disabledRules = new ArrayList<String>();
void prepareConfiguration() throws IOException {
CheckConfigurationBuilder cfgBuilder = new CheckConfigurationBuilder(languageCode);
// cfgBuilder.textSessionID("-2");
if (enabledOnlyRules.isEmpty()) {
cfgBuilder.disabledRuleIds("WHITESPACE_RULE");
cfgBuilder.disabledRuleIds("UNPAIRED_BRACKETS");
if (!disabledRules.isEmpty()) {
cfgBuilder.disabledRuleIds(disabledRules);
}
} else {
cfgBuilder.enabledRuleIds(enabledOnlyRules).enabledOnly();
}
if (!userName.isEmpty() && !apiKey.isEmpty()) {
cfgBuilder.username(userName).apiKey(apiKey).build();
}
ltConfig = cfgBuilder.build();
inputFile = new File(inputFilePath);
if (!inputFile.exists() || inputFile.isDirectory()) {
throw new IOException("File not found: " + inputFile);
}
String fileName = inputFile.getName();
// System.out.println("Analyzing file: " + fileName);
fileName = fileName.substring(0, fileName.lastIndexOf('.'));
if (outputFilePath.isEmpty()) {
outputFile = new File(inputFile.getParentFile() + "/" + fileName + "-loops.txt");
} else {
outputFile = new File(outputFilePath);
}
outStrB = new StringBuilder();
out = new FileWriter(outputFile, true);
lt = new RemoteLanguageTool(Tools.getUrl(remoteServer));
}
}
static private String getFullId(RemoteRuleMatch match) {
String ruleId = "";
if (match != null) {
String subId = null;
try {
subId = match.getRuleSubId().get();
} catch (NoSuchElementException e) {
}
if (subId != null) {
ruleId = match.getRuleId() + "[" + subId + "]";
} else {
ruleId = match.getRuleId();
}
}
return ruleId;
}
}
|
if (args.length != 1) {
System.out.println("Usage: " + CorrectionLoopFinderApi.class.getSimpleName() + " <configFile>");
System.exit(1);
}
String configurationFilename = args[0];
Properties prop = new Properties();
FileInputStream fis = new FileInputStream(configurationFilename);
prop.load(new InputStreamReader(fis, Charset.forName("UTF-8")));
Configuration cfg = new Configuration();
cfg.remoteServer = prop.getProperty("remoteServer", "http://localhost:8081").trim();
cfg.userName = prop.getProperty("userName", "").trim();
cfg.annotatorName = prop.getProperty("annotatorName", "").trim();
cfg.apiKey = prop.getProperty("apiKey", "").trim();
cfg.inputFilePath = prop.getProperty("inputFile", "").trim();
cfg.outputFilePath = prop.getProperty("outputFile", "").trim();
cfg.languageCode = prop.getProperty("languageCode").trim();
String customParamsStr = prop.getProperty("customParams", "").trim();
if (!customParamsStr.isEmpty()) {
for (String customParam : customParamsStr.split(";")) {
String[] parts = customParam.split(",");
cfg.customParams.put(parts[0], parts[1]);
}
}
String enabledOnlyRulesStr = prop.getProperty("enabledOnlyRules", "").trim();
if (!enabledOnlyRulesStr.isEmpty()) {
cfg.enabledOnlyRules = Arrays.asList(enabledOnlyRulesStr.split(","));
}
String disabledRulesStr = prop.getProperty("disabledRules", "").trim();
if (!disabledRulesStr.isEmpty()) {
cfg.disabledRules = Arrays.asList(disabledRulesStr.split(","));
}
// defaultColor="\u001B[0m"
// highlightColor="\u001B[97m"
cfg.ansiDefault = prop.getProperty("defaultColor", "").trim().replaceAll("\"", "");
cfg.ansiHighlight = prop.getProperty("highlightColor", "").trim().replaceAll("\"", "");
cfg.prepareConfiguration();
run(cfg);
| 1,446
| 592
| 2,038
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-http-client/src/main/java/org/languagetool/remote/RemoteRuleMatch.java
|
RemoteRuleMatch
|
toString
|
class RemoteRuleMatch {
private final String ruleId;
private final String ruleDescription;
private final String msg;
private final String context;
private final int contextOffset;
private final int offset;
private final int errorLength;
private String subId;
private String shortMsg;
private List<String> replacements;
private String url;
private String category;
private String categoryId;
private String locQualityIssueType;
RemoteRuleMatch(String ruleId, String ruleDescription, String msg, String context, int contextOffset, int offset, int errorLength) {
this.ruleId = Objects.requireNonNull(ruleId);
this.ruleDescription = ruleDescription;
this.msg = Objects.requireNonNull(msg);
this.context = Objects.requireNonNull(context);
this.contextOffset = contextOffset;
this.offset = offset;
this.errorLength = errorLength;
}
/** Unique (per language) identifier for the error. */
public String getRuleId() {
return ruleId;
}
/** Description of the rule. */
public String getRuleDescription() {
return ruleDescription;
}
/** Optional sub id (rule groups have a sub id for each rule). */
public Optional<String> getRuleSubId() {
return Optional.ofNullable(subId);
}
/** A text describing the error to the user. */
public String getMessage() {
return msg;
}
/** Optional short message describing the error. */
public Optional<String> getShortMessage() {
return Optional.ofNullable(shortMsg);
}
/**
* Potential corrections for the error. Note that corrections might be wrong and
* they are not necessarily ordered by quality.
*/
public Optional<List<String>> getReplacements() {
return Optional.ofNullable(replacements);
}
/** The error in its context. See {@link #getContextOffset()} and {@link #getErrorLength()} to get the exact position. */
public String getContext() {
return context;
}
/** The character position of the error start inside the result of {@link #getContext()}. */
public int getContextOffset() {
return contextOffset;
}
/** The character position where the error starts. */
public int getErrorOffset() {
return offset;
}
/** The length of the error in characters. */
public int getErrorLength() {
return errorLength;
}
/** URL with a more detailed explanation of the error. */
public Optional<String> getUrl() {
return Optional.ofNullable(url);
}
/** The error's category. */
public Optional<String> getCategory() {
return Optional.of(category);
}
/** The id of the error's category. */
public Optional<String> getCategoryId() {
return Optional.of(categoryId);
}
public Optional<String> getLocQualityIssueType() {
return Optional.ofNullable(locQualityIssueType);
}
//
// non-public setters
//
void setRuleSubId(String subId) {
this.subId = subId;
}
void setShortMsg(String shortMsg) {
this.shortMsg = shortMsg;
}
void setReplacements(List<String> replacements) {
this.replacements = Collections.unmodifiableList(replacements);
}
void setUrl(String url) {
this.url = url;
}
void setCategory(String category) {
this.category = category;
}
void setCategoryId(String categoryId) {
this.categoryId = categoryId;
}
void setLocQualityIssueType(String locQualityIssueType) {
this.locQualityIssueType = locQualityIssueType;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return ruleId + "@" + offset + "-" + (offset + errorLength);
| 1,035
| 24
| 1,059
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-http-client/src/main/java/org/languagetool/remote/RemoteServer.java
|
RemoteServer
|
toString
|
class RemoteServer {
private final String software;
private final String version;
private final String buildDate;
RemoteServer(String software, String version, String buildDate) {
this.software = Objects.requireNonNull(software);
this.version = Objects.requireNonNull(version);
this.buildDate = buildDate;
}
/**
* @return the software running on the server, usually {@code LanguageTool}
*/
public String getSoftware() {
return software;
}
/**
* @return the version running on the server, might be something like {@code 3.4-SNAPSHOT} or {@code 3.4}
*/
public String getVersion() {
return version;
}
/**
* @return the build date of the version or null (in case this isn't a real build but runs in an IDE etc.)
*/
public Optional<String> getBuildDate() {
return Optional.ofNullable(buildDate);
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
}
|
return software + "/" + version + "/" + buildDate;
| 282
| 19
| 301
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-http-client/src/main/java/org/languagetool/remote/multiLang/InjectedSentence.java
|
InjectedSentence
|
equals
|
class InjectedSentence {
private final String language;
private final String text;
public InjectedSentence(String language, String text) {
this.language = language;
this.text = text;
}
public String getLanguage() {
return language;
}
public String getText() {
return text.trim();
}
@Override
public String toString() {
return "Sentence: " +
"language='" + language + '\'' +
", text='" + text + '\'';
}
@Override
public boolean equals(Object o) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
int result = getLanguage().hashCode();
result = 31 * result + getText().hashCode();
return result;
}
}
|
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
InjectedSentence that = (InjectedSentence) o;
if (!getLanguage().equals(that.getLanguage())) return false;
if (!getText().equals(that.getText())) return false;
return true;
| 219
| 91
| 310
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-http-client/src/main/java/org/languagetool/remote/multiLang/MultiLangCorpora.java
|
MultiLangCorpora
|
injectOtherSentence
|
class MultiLangCorpora {
private String language;
private String text = "";
private List<InjectedSentence> injectedSentences = new ArrayList<>();
private int sentencesInText;
public MultiLangCorpora(String language) {
this.language = language;
}
public String getLanguage() {
return language;
}
public String getText() {
return text.trim();
}
public List<InjectedSentence> getInjectedSentences() {
return injectedSentences;
}
public void injectOtherSentence(String injectLanguage, String sentence) {<FILL_FUNCTION_BODY>}
public void addSentence(String sentence) {
//System.out.println("Add mainLangText: " + sentence);
this.text += " " + sentence;
this.sentencesInText++;
}
public int getSentencesInText() {
return sentencesInText;
}
}
|
//System.out.println("Add otherLangText: " + sentence);
this.text += " " + sentence;
this.injectedSentences.add(new InjectedSentence(injectLanguage, sentence));
this.sentencesInText++;
| 252
| 66
| 318
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-http-client/src/main/java/org/languagetool/remote/multiLang/MultiLanguageTextCheckEval.java
|
DetectionResults
|
runTest
|
class DetectionResults {
private float time;
private float timeWithout;
private float detectionRate;
private int wrongDetected;
private long charsInText;
private int sentencesInText;
private int injectedSentecesInText;
DetectionResults(float time, float timeWithout, float detectionRate, int wrongDetected, long charsInText, int sentencesInText, int injectedSentecesInText) {
this.time = time;
this.timeWithout = timeWithout;
this.detectionRate = detectionRate;
this.wrongDetected = wrongDetected;
this.charsInText = charsInText;
this.sentencesInText = sentencesInText;
this.injectedSentecesInText = injectedSentecesInText;
}
public float getTimeDiff() {
return time - timeWithout;
}
}
private static DetectionResults runTest(MultiLangCorpora mlc) {
return runTest(mlc, "http://localhost:8081");
}
private static DetectionResults runTest(MultiLangCorpora mlc, String languageToolServer) {<FILL_FUNCTION_BODY>
|
RemoteLanguageTool remoteLanguageTool = new RemoteLanguageTool(Tools.getUrl(languageToolServer));
String language = useLangDetectionService ? "auto" : getSupportedLangCode(mlc.getLanguage());
long startTime = System.currentTimeMillis();
RemoteResult results = null;
try {
Map<String, String> params = new HashMap<>();
params.put("enableMultiLanguageChecks", "true");
params.put("preferredLanguages", "de,en");
results = remoteLanguageTool.check(mlc.getText(), language, params);
} catch (RuntimeException ex) {
if (spamToMe) {
System.out.println("too many errors");
}
return null;
}
long endTime = System.currentTimeMillis();
float timeToCheck = (endTime - startTime) / 1000f;
//2nd check without multilanguage
long startTimeRound2 = System.currentTimeMillis();
try {
Map<String, String> params = new HashMap<>();
params.put("enableMultiLanguageChecks", "false");
params.put("preferredLanguages", "de");
remoteLanguageTool.check(mlc.getText(), language, params);
} catch (RuntimeException ex) {
if (spamToMe) {
System.out.println("too many errors");
}
return null;
}
long endTimeRound2 = System.currentTimeMillis();
float timeToCheckRound2 = (endTimeRound2 - startTimeRound2) / 1000f;
//detected sentences by lt
List<String> detectedSentences = new ArrayList<>();
for (RemoteIgnoreRange range : results.getIgnoreRanges()) {
detectedSentences.add(mlc.getText().substring(range.getFrom(), range.getTo()).trim());
}
//injected lines by checker
List<String> injectedLines = mlc.getInjectedSentences().stream().map(InjectedSentence::getText).collect(Collectors.toList());
//later cleaned by not detectedLines
List<String> detectedLines = new ArrayList<>(injectedLines);
//assume everything is not detected
List<String> notDetectedLines = new ArrayList<>(injectedLines);
//assume everything is wrong detected
List<String> wrongDetectedSentences = new ArrayList<>(detectedSentences);
//filter all correct detected sentences from wrong detected sentences.
List<String> notWrongDetected = new ArrayList<>();
List<String> tmpRemoveFromWrongDetectedSentences = new ArrayList<>();
for (String sentence : wrongDetectedSentences) {
for (String injectedLine : injectedLines) {
if (injectedLine.contains(sentence)) { //If a line in the corpora has more than one sentence, but the range is per sentence
notWrongDetected.add(injectedLine);
tmpRemoveFromWrongDetectedSentences.add(sentence);
}
}
}
wrongDetectedSentences.removeAll(tmpRemoveFromWrongDetectedSentences);
notDetectedLines.removeAll(notWrongDetected);
detectedLines.removeAll(notDetectedLines);
allWrongRanges.addAll(wrongDetectedSentences);
allNotDetected.addAll(notDetectedLines);
if (rounds == 1) {
System.out.println(mlc.getText());
}
return new DetectionResults(timeToCheck, timeToCheckRound2, ((float) detectedLines.size() / (float) injectedLines.size()) * 100, wrongDetectedSentences.size(), mlc.getText().length(), mlc.getSentencesInText(), mlc.getInjectedSentences().size());
| 305
| 964
| 1,269
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/language/Arabic.java
|
Arabic
|
getRelevantRules
|
class Arabic extends Language implements AutoCloseable {
private LanguageModel languageModel;
@Override
public String getName() {
return "Arabic";
}
@Override
public String getShortCode() {
return "ar";
}
@Override
public String[] getCountries() {
return new String[]{"", "SA", "DZ", "BH", "EG", "IQ", "JO", "KW", "LB", "LY", "MA", "OM", "QA", "SD", "SY", "TN", "AE", "YE"};
}
@Override
public Disambiguator createDefaultDisambiguator() {
return new ArabicHybridDisambiguator();
}
@Override
public SentenceTokenizer createDefaultSentenceTokenizer() {
return new SRXSentenceTokenizer(this);
}
@Override
public Tokenizer createDefaultWordTokenizer() {
return new ArabicWordTokenizer();
}
@NotNull
@Override
public Tagger createDefaultTagger() {
return new ArabicTagger();
}
@Override
public Synthesizer createDefaultSynthesizer() {
return ArabicSynthesizer.INSTANCE;
}
@Override
public Contributor[] getMaintainers() {
return new Contributor[]{
new Contributor("Taha Zerrouki"),
new Contributor("Sohaib Afifi")
};
}
@Override
public List<Rule> getRelevantRules(ResourceBundle messages, UserConfig userConfig, Language motherTongue, List<Language> altLanguages) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public List<Rule> getRelevantLanguageModelRules(ResourceBundle messages, LanguageModel languageModel, UserConfig userConfig) {
return Arrays.asList(
new ArabicConfusionProbabilityRule(messages, languageModel, this)
);
}
@Override
public LanguageMaintainedState getMaintainedState() {
return LanguageMaintainedState.ActivelyMaintained;
}
@Override
public synchronized LanguageModel getLanguageModel(File indexDir) {
languageModel = initLanguageModel(indexDir, languageModel);
return languageModel;
}
@Override
public void close() {
if (languageModel != null) {
languageModel.close();
}
}
@Nullable
@Override
protected SpellingCheckRule createDefaultSpellingRule(ResourceBundle messages) throws IOException {
return new ArabicHunspellSpellerRule(messages);
}
}
|
return Arrays.asList(
new MultipleWhitespaceRule(messages, this),
new SentenceWhitespaceRule(messages),
new GenericUnpairedBracketsRule(messages,
Arrays.asList("[", "(", "{", "«", "﴾", "\"", "'"),
Arrays.asList("]", ")", "}", "»", "﴿", "\"", "'")),
new CommaWhitespaceRule(messages, true),
new LongSentenceRule(messages, userConfig, 50),
// specific to Arabic :
new ArabicHunspellSpellerRule(messages, userConfig),
new ArabicCommaWhitespaceRule(messages),
new ArabicQuestionMarkWhitespaceRule(messages),
new ArabicSemiColonWhitespaceRule(messages),
new ArabicDoublePunctuationRule(messages),
new ArabicWordRepeatRule(messages),
new ArabicSimpleReplaceRule(messages),
new ArabicDiacriticsRule(messages),
new ArabicDarjaRule(messages),
new ArabicHomophonesRule(messages),
new ArabicRedundancyRule(messages),
new ArabicWordCoherencyRule(messages),
new ArabicWordinessRule(messages),
new ArabicWrongWordInContextRule(messages, this),
new ArabicTransVerbRule(messages),
new ArabicInflectedOneWordReplaceRule(messages)
);
| 677
| 366
| 1,043
|
<methods>public java.lang.String adaptSuggestion(java.lang.String) ,public List<org.languagetool.rules.RuleMatch> adaptSuggestions(List<org.languagetool.rules.RuleMatch>, Set<java.lang.String>) ,public org.languagetool.rules.RuleMatch adjustMatch(org.languagetool.rules.RuleMatch, List<java.lang.String>) ,public org.languagetool.chunking.Chunker createDefaultChunker() ,public org.languagetool.tagging.disambiguation.Disambiguator createDefaultDisambiguator() ,public org.languagetool.JLanguageTool createDefaultJLanguageTool() ,public org.languagetool.chunking.Chunker createDefaultPostDisambiguationChunker() ,public org.languagetool.tokenizers.SentenceTokenizer createDefaultSentenceTokenizer() ,public org.languagetool.synthesis.Synthesizer createDefaultSynthesizer() ,public org.languagetool.tagging.Tagger createDefaultTagger() ,public org.languagetool.tokenizers.Tokenizer createDefaultWordTokenizer() ,public boolean equals(java.lang.Object) ,public boolean equalsConsiderVariantsIfSpecified(org.languagetool.Language) ,public synchronized org.languagetool.chunking.Chunker getChunker() ,public java.lang.String getClosingDoubleQuote() ,public java.lang.String getClosingSingleQuote() ,public java.lang.String getCommonWordsPath() ,public java.lang.String getConsistencyRulePrefix() ,public abstract java.lang.String[] getCountries() ,public List<java.lang.String> getDefaultDisabledRulesForVariant() ,public List<java.lang.String> getDefaultEnabledRulesForVariant() ,public org.languagetool.Language getDefaultLanguageVariant() ,public org.languagetool.rules.spelling.SpellingCheckRule getDefaultSpellingRule() ,public org.languagetool.rules.spelling.SpellingCheckRule getDefaultSpellingRule(java.util.ResourceBundle) ,public org.languagetool.rules.patterns.Unifier getDisambiguationUnifier() ,public org.languagetool.rules.patterns.UnifierConfiguration getDisambiguationUnifierConfiguration() ,public synchronized org.languagetool.tagging.disambiguation.Disambiguator getDisambiguator() ,public java.util.regex.Pattern getIgnoredCharactersRegex() ,public org.languagetool.languagemodel.LanguageModel getLanguageModel(java.io.File) throws java.io.IOException,public java.util.Locale getLocale() ,public java.util.Locale getLocaleWithCountryAndVariant() ,public org.languagetool.LanguageMaintainedState getMaintainedState() ,public abstract org.languagetool.language.Contributor[] getMaintainers() ,public org.languagetool.rules.spelling.multitoken.MultitokenSpeller getMultitokenSpeller() ,public abstract java.lang.String getName() ,public java.lang.String getOpeningDoubleQuote() ,public java.lang.String getOpeningSingleQuote() ,public synchronized org.languagetool.chunking.Chunker getPostDisambiguationChunker() ,public Map<java.lang.String,java.lang.Integer> getPriorityMap() ,public List<org.languagetool.rules.Rule> getRelevantLanguageModelCapableRules(java.util.ResourceBundle, org.languagetool.languagemodel.LanguageModel, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantLanguageModelRules(java.util.ResourceBundle, org.languagetool.languagemodel.LanguageModel, org.languagetool.UserConfig) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantRemoteRules(java.util.ResourceBundle, List<org.languagetool.rules.RemoteRuleConfig>, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>, boolean) throws java.io.IOException,public abstract List<org.languagetool.rules.Rule> getRelevantRules(java.util.ResourceBundle, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantRulesGlobalConfig(java.util.ResourceBundle, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public Function<org.languagetool.rules.Rule,org.languagetool.rules.Rule> getRemoteEnhancedRules(java.util.ResourceBundle, List<org.languagetool.rules.RemoteRuleConfig>, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>, boolean) throws java.io.IOException,public List<java.lang.String> getRuleFileNames() ,public int getRulePriority(org.languagetool.rules.Rule) ,public synchronized org.languagetool.tokenizers.SentenceTokenizer getSentenceTokenizer() ,public abstract java.lang.String getShortCode() ,public final java.lang.String getShortCodeWithCountryAndVariant() ,public synchronized org.languagetool.synthesis.Synthesizer getSynthesizer() ,public synchronized org.languagetool.tagging.Tagger getTagger() ,public final java.lang.String getTranslatedName(java.util.ResourceBundle) ,public org.languagetool.rules.patterns.Unifier getUnifier() ,public org.languagetool.rules.patterns.UnifierConfiguration getUnifierConfiguration() ,public java.lang.String getVariant() ,public synchronized org.languagetool.tokenizers.Tokenizer getWordTokenizer() ,public boolean hasMinMatchesRules() ,public boolean hasNGramFalseFriendRule(org.languagetool.Language) ,public final boolean hasVariant() ,public int hashCode() ,public boolean isAdvancedTypographyEnabled() ,public boolean isExternal() ,public boolean isHiddenFromGui() ,public boolean isSpellcheckOnlyLanguage() ,public boolean isVariant() ,public List<org.languagetool.rules.RuleMatch> mergeSuggestions(List<org.languagetool.rules.RuleMatch>, org.languagetool.markup.AnnotatedText, Set<java.lang.String>) ,public List<java.lang.String> prepareLineForSpeller(java.lang.String) ,public void setChunker(org.languagetool.chunking.Chunker) ,public void setDisambiguator(org.languagetool.tagging.disambiguation.Disambiguator) ,public void setPostDisambiguationChunker(org.languagetool.chunking.Chunker) ,public void setSentenceTokenizer(org.languagetool.tokenizers.SentenceTokenizer) ,public void setSynthesizer(org.languagetool.synthesis.Synthesizer) ,public void setTagger(org.languagetool.tagging.Tagger) ,public void setWordTokenizer(org.languagetool.tokenizers.Tokenizer) ,public java.lang.String toAdvancedTypography(java.lang.String) ,public final java.lang.String toString() <variables>private static final java.util.regex.Pattern APOSTROPHE,private static final org.languagetool.tagging.disambiguation.Disambiguator DEMO_DISAMBIGUATOR,private static final org.languagetool.tagging.Tagger DEMO_TAGGER,private static final java.util.regex.Pattern DOUBLE_QUOTE_PATTERN,private static final java.util.regex.Pattern ELLIPSIS,private static final java.util.regex.Pattern INSIDE_SUGGESTION,private static final java.util.regex.Pattern NBSPACE1,private static final java.util.regex.Pattern NBSPACE2,private static final java.util.regex.Pattern QUOTED_CHAR_PATTERN,private static final org.languagetool.tokenizers.SentenceTokenizer SENTENCE_TOKENIZER,private static final java.util.regex.Pattern SINGLE_QUOTE_PATTERN,private static final java.util.regex.Pattern SUGGESTION_CLOSE_TAG,private static final java.util.regex.Pattern SUGGESTION_OPEN_TAG,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_1,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_2,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_3,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_4,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_5,private static final org.languagetool.tokenizers.WordTokenizer WORD_TOKENIZER,private org.languagetool.chunking.Chunker chunker,private final org.languagetool.rules.patterns.UnifierConfiguration disambiguationUnifierConfig,private org.languagetool.tagging.disambiguation.Disambiguator disambiguator,private final java.util.regex.Pattern ignoredCharactersRegex,private static final Map<Class<org.languagetool.Language>,org.languagetool.JLanguageTool> languagetoolInstances,private static final Logger logger,private final java.util.concurrent.atomic.AtomicBoolean noLmWarningPrinted,private List<org.languagetool.rules.patterns.AbstractPatternRule> patternRules,private org.languagetool.chunking.Chunker postDisambiguationChunker,private org.languagetool.tokenizers.SentenceTokenizer sentenceTokenizer,private java.lang.String shortCodeWithCountryAndVariant,private static final Map<Class<? extends org.languagetool.Language>,org.languagetool.rules.spelling.SpellingCheckRule> spellingRules,private org.languagetool.synthesis.Synthesizer synthesizer,private org.languagetool.tagging.Tagger tagger,private final org.languagetool.rules.patterns.UnifierConfiguration unifierConfig,private org.languagetool.tokenizers.Tokenizer wordTokenizer
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/rules/ar/ArabicHunspellSpellerRule.java
|
ArabicHunspellSpellerRule
|
tokenizeText
|
class ArabicHunspellSpellerRule extends HunspellRule {
public static final String RULE_ID = "HUNSPELL_RULE_AR";
private static final String RESOURCE_FILENAME = "/ar/hunspell/ar.dic";
public ArabicHunspellSpellerRule(ResourceBundle messages, UserConfig userConfig) {
super(messages, new Arabic(), userConfig);
}
public ArabicHunspellSpellerRule(ResourceBundle messages) {
this(messages, null);
}
@Override
public String getId() {
return RULE_ID;
}
@Override
@NotNull
protected String getDictFilenameInResources(String langCountry) {
return RESOURCE_FILENAME;
}
@Override
protected String[] tokenizeText(String sentence) {<FILL_FUNCTION_BODY>}
@Override
protected boolean ignoreWord(String word) throws IOException {
String striped = ArabicStringTools.removeTashkeel(word);
return super.ignoreWord(striped);
}
@Override
public boolean isMisspelled(String word) {
String striped = ArabicStringTools.removeTashkeel(word);
return super.isMisspelled(striped);
}
@Override
protected boolean isLatinScript() {
return false;
}
}
|
Pattern pattern = Pattern.compile("[^\\p{L}" + ArabicStringTools.TASHKEEL_CHARS + "]");
return pattern.split(sentence);
| 363
| 49
| 412
|
<methods>public void <init>(java.util.ResourceBundle, org.languagetool.Language, org.languagetool.UserConfig) ,public void <init>(java.util.ResourceBundle, org.languagetool.Language, org.languagetool.UserConfig, List<org.languagetool.Language>) ,public void <init>(java.util.ResourceBundle, org.languagetool.Language, org.languagetool.UserConfig, List<org.languagetool.Language>, org.languagetool.languagemodel.LanguageModel) ,public static Queue<java.lang.String> getActiveChecks() ,public java.lang.String getDescription() ,public java.lang.String getId() ,public List<java.lang.String> getSuggestions(java.lang.String) throws java.io.IOException,public boolean isMisspelled(java.lang.String) ,public org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException<variables>protected static final java.lang.String FILE_EXTENSION,private final java.util.regex.Pattern MINUS_PLUS,private static final java.lang.String NON_ALPHABETIC,public static final java.lang.String RULE_ID,private static final java.util.regex.Pattern STARTS_WITH_TWO_UPPERCASE_CHARS,private static final java.lang.String[] WHITESPACE_ARRAY,private static final ConcurrentLinkedQueue<java.lang.String> activeChecks,private final List<java.lang.String> commonGermanWords,private final List<java.lang.String> commonPortugueseWords,private final non-sealed List<org.languagetool.rules.spelling.RuleWithLanguage> enSpellRules,protected volatile org.languagetool.rules.spelling.hunspell.HunspellDictionary hunspell,private static final Logger logger,private static final boolean monitorRules,private volatile boolean needsInit,protected java.util.regex.Pattern nonWordPattern,private final non-sealed org.languagetool.UserConfig userConfig
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/rules/ar/ArabicInflectedOneWordReplaceRule.java
|
ArabicInflectedOneWordReplaceRule
|
match
|
class ArabicInflectedOneWordReplaceRule extends AbstractSimpleReplaceRule2 {
public static final String AR_INFLECTED_ONE_WORD_REPLACE = "AR_INFLECTED_ONE_WORD";
private static final String FILE_NAME = "/ar/inflected_one_word.txt";
private static final Locale AR_LOCALE = new Locale("ar");
private final ArabicTagger tagger;
private final ArabicSynthesizer synthesizer;
private final List<Map<String, SuggestionWithMessage>> wrongWords;
public ArabicInflectedOneWordReplaceRule(ResourceBundle messages) {
super(messages, new Arabic());
tagger = new ArabicTagger();
tagger.enableNewStylePronounTag();
synthesizer = new ArabicSynthesizer(new Arabic());
super.setCategory(Categories.MISC.getCategory(messages));
setLocQualityIssueType(ITSIssueType.Inconsistency);
addExamplePair(Example.wrong("أجريت <marker>أبحاثا</marker> في المخبر"),
Example.fixed("أجريت <marker>بحوثا</marker> في المخبر."));
// get wrong words from resource file
wrongWords = getWrongWords();
}
@Override
public String getId() {
return AR_INFLECTED_ONE_WORD_REPLACE;
}
@Override
public final List<String> getFileNames() {
return Collections.singletonList(FILE_NAME);
}
@Override
public String getDescription() {
return "قاعدة تطابق الكلمات التي يجب تجنبها وتقترح تصويبا لها";
}
@Override
public String getShort() {
return "خطأ، يفضل أن يقال:";
}
@Override
public String getMessage() {
return " لا تقل '$match' بل قل: $suggestions";
}
@Override
public String getSuggestionsSeparator() {
return " أو ";
}
@Override
public Locale getLocale() {
return AR_LOCALE;
}
@Override
public RuleMatch[] match(AnalyzedSentence sentence) {<FILL_FUNCTION_BODY>}
/* return True if the word is a candidate to be replaced in text rule file */
private boolean isCandidateWord(AnalyzedToken mytoken) {
if (getSuggestedWords(mytoken) != null) {
return true;
} else {
return false;
}
}
/* if the word is in text rules file, return the suggested word*/
private SuggestionWithMessage getSuggestedWords(AnalyzedToken mytoken) {
// keep the suitable postags
AnalyzedToken wordTok = mytoken;
String wordLemma = wordTok.getLemma();
String wordPostag = wordTok.getPOSTag();
// if postag is attached
// test if word is in the word list
if (wordPostag != null) {
// lookup in WrongWords
SuggestionWithMessage wordLemmaMatch = wrongWords.get(wrongWords.size() - 1).get(wordLemma);
// The lemma is found in the dictionary file
if (wordLemmaMatch != null) {
return wordLemmaMatch;
}
}
return null;
}
/* generate a new form according to a specific postag,*/
private List<String> inflectSuggestedWords(String targetLemma, AnalyzedToken sourcetoken) {
return synthesizer.inflectLemmaLike(targetLemma, sourcetoken);
}
}
|
List<RuleMatch> ruleMatches = new ArrayList<>();
if (wrongWords.size() == 0) {
return toRuleMatchArray(ruleMatches);
}
AnalyzedTokenReadings[] tokens = sentence.getTokensWithoutWhitespace();
for (AnalyzedTokenReadings token : tokens) { // ignoring token 0, i.e., SENT_START
// browse each word with
for (AnalyzedToken wordTok : token.getReadings()) {
// test if the first token is a to replace word
boolean isCandidateWord = isCandidateWord(wordTok);
if (isCandidateWord) {
// get suggestions
List<String> propositions = new ArrayList<>();
String sugMsg = "";
SuggestionWithMessage propositionsWithMessage = getSuggestedWords(wordTok);
if (propositionsWithMessage != null) {
propositions = Arrays.asList(propositionsWithMessage.getSuggestion().split("\\|"));
sugMsg = propositionsWithMessage.getMessage();
sugMsg = sugMsg != null ? sugMsg : "";
}
// generate suggestion according to suggested word
StringBuilder replacement = new StringBuilder("");
for (String proposition : propositions) {
List<String> inflectedWordList = inflectSuggestedWords(proposition, wordTok);
for (String w : inflectedWordList) {
replacement.append("<suggestion>" + w + "</suggestion> ");
}
}
String msg = "' الكلمة خاطئة " + token.getToken() + " ' ،" + sugMsg + ". استعمل " + replacement;
RuleMatch match = new RuleMatch(
this, sentence, token.getStartPos(), token.getEndPos(),
token.getStartPos(), token.getEndPos(), msg, "خطأ في استعمال كلمة:" + sugMsg);
ruleMatches.add(match);
}
} // end wordTok
}
return toRuleMatchArray(ruleMatches);
| 1,067
| 544
| 1,611
|
<methods>public void <init>(java.util.ResourceBundle, org.languagetool.Language) ,public boolean checkKeyWordsAreKnownToSpeller() ,public boolean checkKeyWordsAreUnknownToSpeller() ,public org.languagetool.rules.AbstractSimpleReplaceRule2.CaseSensitivy getCaseSensitivy() ,public abstract java.lang.String getDescription() ,public java.lang.String getDescription(java.lang.String) ,public abstract List<java.lang.String> getFileNames() ,public List<java.net.URL> getFilePaths() ,public abstract java.lang.String getId() ,public abstract java.util.Locale getLocale() ,public abstract java.lang.String getMessage() ,public abstract java.lang.String getShort() ,public java.lang.String getSuggestionsSeparator() ,public List<Map<java.lang.String,org.languagetool.rules.SuggestionWithMessage>> getWrongWords() ,public org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) ,public boolean separateKeyWordsBySpeller() ,public void useSubRuleSpecificIds() <variables>private int MAX_LENGTH_SHORT_WORDS,private static final int MAX_TOKENS_IN_MULTIWORD,private boolean ignoreShortUppercaseWords,private volatile boolean initialized,protected final non-sealed org.languagetool.Language language,private Map<java.lang.String,org.languagetool.rules.SuggestionWithMessage> mFullNoSpace,private Map<java.lang.String,org.languagetool.rules.SuggestionWithMessage> mFullSpace,private Map<java.lang.String,java.lang.Integer> mStartNoSpace,private Map<java.lang.String,java.lang.Integer> mStartSpace,private boolean ruleHasSuggestions,protected boolean subRuleSpecificIds
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/rules/ar/ArabicWordRepeatRule.java
|
ArabicWordRepeatRule
|
ignore
|
class ArabicWordRepeatRule extends WordRepeatRule {
public ArabicWordRepeatRule(ResourceBundle messages) {
super(messages, new Arabic());
addExamplePair(Example.wrong("هذا <marker>فقط فقط</marker> مثال."),
Example.fixed("هذا <marker>فقط</marker> مثال."));
}
@Override
public String getId() {
return "ARABIC_WORD_REPEAT_RULE";
}
@Override
public boolean ignore(AnalyzedTokenReadings[] tokens, int position) {<FILL_FUNCTION_BODY>}
}
|
if (wordRepetitionOf("خطوة", tokens, position)) {
return true; // "نفذت التعليمات خطوة خطوة."
}
if (wordRepetitionOf("رويدا", tokens, position)) {
return true;
}
return super.ignore(tokens, position);
| 192
| 111
| 303
|
<methods>public void <init>(java.util.ResourceBundle, org.languagetool.Language) ,public int estimateContextForSureMatch() ,public java.lang.String getDescription() ,public java.lang.String getId() ,public boolean ignore(org.languagetool.AnalyzedTokenReadings[], int) ,public org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) <variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/rules/ar/filters/ArabicAdjectiveToExclamationFilter.java
|
ArabicAdjectiveToExclamationFilter
|
prepareSuggestions
|
class ArabicAdjectiveToExclamationFilter extends RuleFilter {
public ArabicAdjectiveToExclamationFilter() {
this.adj2compList = loadFromPath(FILE_NAME);
}
private final ArabicTagger tagger = new ArabicTagger();
private static final String FILE_NAME = "/ar/arabic_adjective_exclamation.txt";
private final Map<String, List<String>> adj2compList;
private final Map<String, String> adj2comp = new HashMap<String, String>() {{
// tri letters verb:
put("رشيد", "أرشد");
put("طويل", "أطول");
put("بديع", "أبدع");
}};
@Nullable
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> arguments, int patternTokenPos, AnalyzedTokenReadings[] patternTokens, List<Integer> tokenPositions) {
// This rule return only the comparative according to given adjective
String adj = arguments.get("adj"); // extract adjective
String noun = arguments.get("noun"); // the second argument
int adjTokenIndex;
try {
adjTokenIndex = Integer.parseInt(arguments.get("adj_pos")) - 1;
} catch (NumberFormatException e) {
throw new RuntimeException("Error parsing adj_pos from : " + arguments.get("adj_pos"), e);
}
// filter tokens which have a lemma of adjective
// some cases can have multiple lemmas, but only adjective lemma are used
List<String> adjLemmas = tagger.getLemmas(patternTokens[adjTokenIndex], "adj");
// get comparative from Adj/comp list
List<String> compList = new ArrayList<>();
for (String adjLemma : adjLemmas) {
// get comparative suitable to adjective
List<String> comparativeList = adj2compList.get(adjLemma);
if (comparativeList != null) {
compList.addAll(comparativeList);
}
}
// remove duplicates
compList = new ArrayList<>(new HashSet<>(compList));
RuleMatch newMatch = new RuleMatch(match.getRule(), match.getSentence(), match.getFromPos(), match.getToPos(), match.getMessage(), match.getShortMessage());
// generate suggestion
List<String> suggestionList = prepareSuggestions(compList, noun);
for (String sug : suggestionList) {
newMatch.addSuggestedReplacement(sug);
}
return newMatch;
}
/* prepare suggestion for a list of comparative */
protected static List<String> prepareSuggestions(List<String> compList, String noun) {
List<String> sugList = new ArrayList<>();
for (String comp : compList) {
sugList.addAll(prepareSuggestions(comp, noun));
}
return sugList;
}
protected static List<String> prepareSuggestions(String comp, String noun) {<FILL_FUNCTION_BODY>}
/* test if the word is an isolated pronoun */
private static boolean isPronoun(String word) {
if (word == null) {
return false;
}
return word.equals("هو")
|| word.equals("هي")
|| word.equals("هم")
|| word.equals("هما")
|| word.equals("أنا");
}
/* get corresponding attached to unattached pronoun */
private static String getAttachedPronoun(String word) {
if (word == null) {
return "";
}
Map<String, String> isolatedToAttachedPronoun = new HashMap<>();
isolatedToAttachedPronoun.put("هو", "ه");
isolatedToAttachedPronoun.put("هي", "ها");
isolatedToAttachedPronoun.put("هم", "هم");
isolatedToAttachedPronoun.put("هن", "هن");
isolatedToAttachedPronoun.put("نحن", "نا");
return isolatedToAttachedPronoun.getOrDefault(word, "");
}
protected static Map<String, List<String>> loadFromPath(String path) {
return new SimpleReplaceDataLoader().loadWords(path);
}
public static String getDataFilePath() {
return FILE_NAME;
}
}
|
/*
الحالات:
الاسم ليس ضميرا
ال كم الولد جميل==> ما أجمل الولد
أجمل بالولد
حالة الضمير
كم هو جميل==> ما أجمله
أجمل به
حالة الضفة غير الثلاثية
اسم:
كم الطالب شديد الاستيعاب
ما أشد استيعاب الطالب
أشدد باستيعابه
ضمير
كم هو شديد الاستيعاب
ما أشد استيعابه
أشد باستيعابه
*/
List<String> sugList = new ArrayList<>();
StringBuilder suggestion = new StringBuilder();
suggestion.append(comp);
if (noun == null || noun.isEmpty()) {
} else if (isPronoun(noun)) {
// no space adding
suggestion.append(ArabicWordMaps.getAttachedPronoun(noun));
} else {
//if comparative is of second form don't add a space
if (!comp.endsWith(" ب")) {
suggestion.append(" ");
}
suggestion.append(noun);
}
// add suggestions
sugList.add(suggestion.toString());
return sugList;
| 1,146
| 525
| 1,671
|
<methods>public non-sealed void <init>() ,public abstract org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) throws java.io.IOException,public boolean matches(Map<java.lang.String,java.lang.String>, org.languagetool.AnalyzedTokenReadings[], int, List<java.lang.Integer>) throws java.io.IOException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/rules/ar/filters/ArabicDMYDateCheckFilter.java
|
ArabicDMYDateCheckFilter
|
acceptRuleMatch
|
class ArabicDMYDateCheckFilter extends ArabicDateCheckFilter {
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> args, int patternTokenPos, AnalyzedTokenReadings[] patternTokens, List<Integer> tokenPositions) {<FILL_FUNCTION_BODY>}
}
|
if (args.containsKey("year") || args.containsKey("month") || args.containsKey("day")) {
throw new RuntimeException("Set only 'weekDay' and 'date' for " + ArabicDMYDateCheckFilter.class.getSimpleName());
}
String dateString = getRequired("date", args);
String[] parts = dateString.split("-");
if (parts.length != 3) {
throw new RuntimeException("Expected date in format 'dd-mm-yyyy': '" + dateString + "'");
}
args.put("day", parts[0]);
args.put("month", parts[1]);
args.put("year", parts[2]);
return super.acceptRuleMatch(match, args, patternTokenPos, patternTokens, tokenPositions);
| 81
| 196
| 277
|
<methods>public non-sealed void <init>() <variables>private final org.languagetool.rules.ar.filters.ArabicDateFilterHelper dateFilterHelper
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/rules/ar/filters/ArabicDateFilterHelper.java
|
ArabicDateFilterHelper
|
getMonth
|
class ArabicDateFilterHelper {
protected Calendar getCalendar() {
return Calendar.getInstance(Locale.UK);
}
@SuppressWarnings("ControlFlowStatementWithoutBraces")
protected int getDayOfWeek(String dayStr) {
switch (dayStr) {
case "السبت":
return Calendar.SATURDAY;
case "الأحد":
return Calendar.SUNDAY;
case "الإثنين":
return Calendar.MONDAY;
case "الاثنين":
return Calendar.MONDAY;
case "الثلاثاء":
return Calendar.TUESDAY;
case "الأربعاء":
return Calendar.WEDNESDAY;
case "الخميس":
return Calendar.THURSDAY;
case "الجمعة":
return Calendar.FRIDAY;
}
throw new RuntimeException("No day name found for " + dayStr + "'");
}
@SuppressWarnings({"ControlFlowStatementWithoutBraces", "MagicNumber"})
protected int getMonth(String monthStr) {<FILL_FUNCTION_BODY>}
/* get day of week name */
protected String getDayOfWeekName(int day) {
switch (day) {
case Calendar.SATURDAY:
return "السبت";
case Calendar.SUNDAY:
return "الأحد";
case Calendar.MONDAY:
return "الإثنين";
case Calendar.TUESDAY:
return "الثلاثاء";
case Calendar.WEDNESDAY:
return "الأربعاء";
case Calendar.THURSDAY:
return "الخميس";
case Calendar.FRIDAY:
return "الجمعة";
default:
return "غير محدد";
}
}
}
|
String mon = StringTools.trimSpecialCharacters(monthStr);
switch (mon) {
// الأشهر العربية بالسريانية
case "كانون الثاني":
return 1;
case "كانون ثاني":
return 1;
case "شباط":
return 2;
case "آذار":
return 3;
case "نيسان":
return 4;
case "أيار":
return 5;
case "حزيران":
return 6;
case "تموز":
return 7;
case "آب":
return 8;
case "أيلول":
return 9;
case "تشرين الأول":
return 10;
case "تشرين الثاني":
return 11;
case "كانون الأول":
return 12;
case "تشرين ثاني":
return 11;
case "كانون أول":
return 12;
// الأشهر المعربة عن الإنجليزية
case "يناير":
return 1;
case "فبراير":
return 2;
case "مارس":
return 3;
case "أبريل":
return 4;
case "مايو":
return 5;
case "يونيو":
return 6;
case "يوليو":
return 7;
case "أغسطس":
return 8;
case "سبتمبر":
return 9;
case "أكتوبر":
return 10;
case "نوفمبر":
return 11;
case "ديسمبر":
return 12;
// الأشهر المعربة عن الفرنسية
case "جانفي":
return 1;
case "جانفييه":
return 1;
case "فيفري":
return 2;
case "أفريل":
return 4;
case "ماي":
return 5;
case "جوان":
return 6;
case "جويلية":
return 7;
case "أوت":
return 8;
}
throw new RuntimeException("No month name for '" + monthStr + "'");
| 570
| 845
| 1,415
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/rules/ar/filters/ArabicMasdarToVerbFilter.java
|
ArabicMasdarToVerbFilter
|
filterLemmas
|
class ArabicMasdarToVerbFilter extends RuleFilter {
public ArabicMasdarToVerbFilter() {
this.masdar2verbList = loadFromPath(FILE_NAME);
}
private final ArabicTagger tagger = new ArabicTagger();
private static final String FILE_NAME = "/ar/arabic_masdar_verb.txt";
private Map<String, List<String>> masdar2verbList;
private final ArabicSynthesizer synthesizer = new ArabicSynthesizer(new Arabic());
final List<String> authorizeLemma = new ArrayList() {{
add("قَامَ");
}};
private final Map<String, String> masdar2verb = new HashMap<String, String>() {{
// tri letters verb:
put("عمل", "عَمِلَ");
put("إعمال", "أَعْمَلَ");
put("تعميل", "عَمَّلَ");
put("ضرب", "ضَرَبَ");
put("أكل", "أَكَلَ");
// regular ones:
// non tri letters verb
put("إجابة", "أَجَابَ");
}};
@Nullable
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> arguments, int patternTokenPos, AnalyzedTokenReadings[] patternTokens, List<Integer> tokenPositions) {
// The pattern is composed of the words
// قام بالأكل
// يقوم بالأكل
// يقومون بالأكل
// first token: auxialliary verb Qam
// second token: Noun as Masdar
// replace the Masdar by its verb
// inflect the verb according the auxilaiary verb inflection
String auxVerb = arguments.get("verb"); // الفعل قام أو ما شابهه
String masdar = arguments.get("noun"); // masdar
// filter tokens which have a lemma
// some cases can have multiple lemmas, but only auxilliry lemma are used
List<String> auxVerbLemmasAll = tagger.getLemmas(patternTokens[0], "verb");
List<String> auxVerbLemmas = filterLemmas(auxVerbLemmasAll);
// get all lemmas of the given masdar
List<String> masdarLemmas = tagger.getLemmas(patternTokens[1], "masdar");
// generate multiple verb from masdar lemmas list
List<String> verbList = new ArrayList<>();
// if the auxiliary verb has many lemmas, filter authorized lemma only
// the first token: auxiliary verb
for (AnalyzedToken auxVerbToken : patternTokens[0]) {
// if the token has an authorized lemma
if (auxVerbLemmas.contains(auxVerbToken.getLemma())) {
// for all masdar lemmas
for (String lemma : masdarLemmas) {
List<String> verbLemmaList = masdar2verbList.get(lemma);
if (verbLemmaList != null) {
// if verb, inflect verd according to auxialiary verb inlfection
for (String vrbLem : verbLemmaList) {
List<String> inflectedverbList = synthesizer.inflectLemmaLike(vrbLem, auxVerbToken);
verbList.addAll(inflectedverbList);
}
}
}
}
}
// remove duplicates
verbList = new ArrayList<>(new HashSet<>(verbList));
RuleMatch newMatch = new RuleMatch(match.getRule(), match.getSentence(), match.getFromPos(), match.getToPos(), match.getMessage(), match.getShortMessage());
// generate suggestion
for (String verb : verbList) {
newMatch.addSuggestedReplacement(verb);
}
return newMatch;
}
List<String> filterLemmas(List<String> lemmas) {<FILL_FUNCTION_BODY>}
protected static Map<String, List<String>> loadFromPath(String path) {
return new SimpleReplaceDataLoader().loadWords(path);
}
}
|
List<String> filtred = new ArrayList<>();
for (String lem : authorizeLemma) {
if (lemmas.contains(lem)) {
filtred.add(lem);
}
}
return filtred;
| 1,149
| 66
| 1,215
|
<methods>public non-sealed void <init>() ,public abstract org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) throws java.io.IOException,public boolean matches(Map<java.lang.String,java.lang.String>, org.languagetool.AnalyzedTokenReadings[], int, List<java.lang.Integer>) throws java.io.IOException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/rules/ar/filters/ArabicVerbToMafoulMutlaqFilter.java
|
ArabicVerbToMafoulMutlaqFilter
|
acceptRuleMatch
|
class ArabicVerbToMafoulMutlaqFilter extends RuleFilter {
public ArabicVerbToMafoulMutlaqFilter() {
this.verb2masdarList = loadFromPath(FILE_NAME);
}
private final ArabicTagger tagger = new ArabicTagger();
private static final String FILE_NAME = "/ar/arabic_verb_masdar.txt";
private Map<String, List<String>> verb2masdarList;
private final ArabicSynthesizer synthesizer = new ArabicSynthesizer(new Arabic());
private final Map<String, String> verb2masdar = new HashMap<String, String>() {{
// tri letters verb:
put("عَمِلَ", "عمل");
put("أَعْمَلَ", "إعمال");
put("عَمَّلَ", "تعميل");
put("أَكَلَ", "أكل");
put("سَأَلَ", "سؤال");
// regular ones:
// non tri letters verb
put("أَجَابَ", "إجابة");
}};
@Nullable
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> arguments, int patternTokenPos, AnalyzedTokenReadings[] patternTokens, List<Integer> tokenPositions) {<FILL_FUNCTION_BODY>}
protected static Map<String, List<String>> loadFromPath(String path) {
return new SimpleReplaceDataLoader().loadWords(path);
}
}
|
String verb = arguments.get("verb");
List<String> verbLemmas = tagger.getLemmas(patternTokens[0], "verb");
String adj = arguments.get("adj");
// generate multiple masdar from verb lemmas list */
List<String> inflectedMasdarList = new ArrayList<>();
List<String> inflectedAdjList = new ArrayList<>();
String inflectedAdjMasculine = synthesizer.inflectAdjectiveTanwinNasb(adj, false);
String inflectedAdjfeminin = synthesizer.inflectAdjectiveTanwinNasb(adj, true);
for (String lemma : verbLemmas) {
// get sugegsted masdars lemmas
List<String> msdrLemmaList = verb2masdarList.get(lemma);
if (msdrLemmaList != null) {
for (String msdr : msdrLemmaList) {
if (msdr != null) {
String inflectedMasdar = synthesizer.inflectMafoulMutlq(msdr);
inflectedMasdarList.add(inflectedMasdar);
String inflectedAdj = (msdr.endsWith(Character.toString(TEH_MARBUTA))) ? inflectedAdjfeminin : inflectedAdjMasculine;
inflectedAdjList.add(inflectedAdj);
}
}
}
}
RuleMatch newMatch = new RuleMatch(match.getRule(), match.getSentence(), match.getFromPos(), match.getToPos(), match.getMessage(), match.getShortMessage());
int i = 0;
List<String> suggestionPhrases = new ArrayList<>();
for (String msdr : inflectedMasdarList) {
String sugPhrase = verb + " " + msdr + " " + inflectedAdjList.get(i);
// Avoid redundancy
if (!suggestionPhrases.contains(sugPhrase)) {
newMatch.addSuggestedReplacement(sugPhrase);
suggestionPhrases.add(sugPhrase);
}
i++;
}
return newMatch;
| 450
| 559
| 1,009
|
<methods>public non-sealed void <init>() ,public abstract org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) throws java.io.IOException,public boolean matches(Map<java.lang.String,java.lang.String>, org.languagetool.AnalyzedTokenReadings[], int, List<java.lang.Integer>) throws java.io.IOException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/tools/ArabicUnitsHelper.java
|
ArabicUnitsHelper
|
getForm
|
class ArabicUnitsHelper {
// map of unit cases
static final Map<String, List<String>> unitsMap2 = new HashMap<>();
static final Map<String, Map<String, String>> unitsMap = new HashMap<>();
// cases will be indexed by numeric values
final short ONE_POS_RAF3 = 0;
final short ONE_POS_NASB = 1;
final short ONE_POS_JAR = 2;
final short TWO_POS_RAF3 = 3;
final short TWO_POS_NASB = 4;
final short TWO_POS_JAR = 5;
final short PLURAL_POS_RAF3 = 6;
final short PLURAL_POS_NASB = 7;
final short PLURAL_POS_JAR = 8;
static {
Map<String, String> unitsEntry = new HashMap<>();
unitsEntry.put("feminin", "no");
unitsEntry.put("one_raf3", "دينار");
unitsEntry.put("one_nasb", "دينارًا");
unitsEntry.put("one_jar", "دينارٍ");
unitsEntry.put("two_raf3", "ديناران");
unitsEntry.put("two_nasb", "دينارين");
unitsEntry.put("two_jar", "دينارين");
unitsEntry.put("plural_raf3", "دنانيرُ");
unitsEntry.put("plural_nasb", "دنانيرَ");
unitsEntry.put("plural_jar", "دنانيرَ");
unitsMap.put("دينار", unitsEntry);
unitsEntry = new HashMap<>();
unitsEntry.put("feminin", "no");
unitsEntry.put("one_raf3", "درهم");
unitsEntry.put("one_nasb", "درهمًا");
unitsEntry.put("one_jar", "درهمٍ");
unitsEntry.put("two_raf3", "درهمان");
unitsEntry.put("two_nasb", "درهمين");
unitsEntry.put("two_jar", "درهمين");
unitsEntry.put("plural_raf3", "دراهمُ");
unitsEntry.put("plural_nasb", "دراهمَ");
unitsEntry.put("plural_jar", "دراهمَ");
unitsMap.put("درهم", unitsEntry);
unitsEntry = new HashMap<>();
unitsEntry.put("feminin", "no");
unitsEntry.put("one_raf3", "دولار");
unitsEntry.put("one_nasb", "دولارًا");
unitsEntry.put("one_jar", "دولارٍ");
unitsEntry.put("two_raf3", "دولاران");
unitsEntry.put("two_nasb", "دولارين");
unitsEntry.put("two_jar", "دولارين");
unitsEntry.put("plural_raf3", "دولاراتٌ");
unitsEntry.put("plural_nasb", "دولاراتٍ");
unitsEntry.put("plural_jar", "دولاراتٍ");
unitsMap.put("دولار", unitsEntry);
unitsEntry = new HashMap<>();
unitsEntry.put("feminin", "yes");
unitsEntry.put("one_raf3", "ليرة");
unitsEntry.put("one_nasb", "ليرةً");
unitsEntry.put("one_jar", "ليرةٍ");
unitsEntry.put("two_raf3", "ليرتان");
unitsEntry.put("two_nasb", "ليرتين");
unitsEntry.put("two_jar", "ليرتين");
unitsEntry.put("plural_raf3", "ليراتٌ");
unitsEntry.put("plural_nasb", "ليراتٍ");
unitsEntry.put("plural_jar", "ليراتٍ");
unitsMap.put("ليرة", unitsEntry);
}
/* test if the unit is feminin */
public static boolean isFeminin(String unit) {
return (unitsMap.containsKey(unit) && unitsMap.get(unit).getOrDefault("feminin", "no").equals("yes"));
}
public static boolean isUnit(String unit) {
return unitsMap.containsKey(unit);
}
/* return the suitable form of units according to inflection */
public static String getForm(String unit, String category, String inflection) {<FILL_FUNCTION_BODY>}
/* return the suitable form of units according to inflection */
public static String getOneForm(String unit, String inflection) {
return getForm(unit, "one", inflection);
}
/* return the suitable form of units according to inflection */
public static String getTwoForm(String unit, String inflection) {
return getForm(unit, "two", inflection);
} /* return the suitable form of units according to inflection */
public static String getPluralForm(String unit, String inflection) {
return getForm(unit, "plural", inflection);
}
}
|
if (inflection.isEmpty()) {
inflection = "raf3";
}
String key = category + "_" + inflection;
if (unitsMap.containsKey(unit)) {
return unitsMap.get(unit).getOrDefault(key, "[" + unit + "]");
}
return "[[" + unit + "]]";
| 1,450
| 90
| 1,540
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ar/src/main/java/org/languagetool/tools/ArabicWordMaps.java
|
ArabicWordMaps
|
getAttachedPronoun
|
class ArabicWordMaps {
private static final ArabicConstantsMaps constantMap = new ArabicConstantsMaps();
private ArabicWordMaps() {
// restrict instantiation
}
// generate the attached forms from isolated
public static String getAttachedPronoun(String word) {<FILL_FUNCTION_BODY>}
}
|
if (word == null)
return "";
return ArabicConstantsMaps.isolatedToAttachedPronoun.getOrDefault(word, "");
| 88
| 42
| 130
|
<no_super_class>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ast/src/main/java/org/languagetool/language/Asturian.java
|
Asturian
|
getRelevantRules
|
class Asturian extends Language {
@Override
public String getName() {
return "Asturian";
}
@Override
public String getShortCode() {
return "ast";
}
@Override
public String[] getCountries() {
return new String[]{"ES"};
}
@Override
public Contributor[] getMaintainers() {
return new Contributor[] { new Contributor("Xesús González Rato") };
}
@Override
public List<Rule> getRelevantRules(ResourceBundle messages, UserConfig userConfig, Language motherTongue, List<Language> altLanguages) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public SentenceTokenizer createDefaultSentenceTokenizer() {
return new SRXSentenceTokenizer(this);
}
@NotNull
@Override
public Tagger createDefaultTagger() {
return new AsturianTagger();
}
@Nullable
@Override
protected SpellingCheckRule createDefaultSpellingRule(ResourceBundle messages) throws IOException {
return new MorfologikAsturianSpellerRule(messages, this, null, null);
}
}
|
return Arrays.asList(
new CommaWhitespaceRule(messages),
new DoublePunctuationRule(messages),
new GenericUnpairedBracketsRule(messages),
new MorfologikAsturianSpellerRule(messages, this, userConfig, altLanguages),
new UppercaseSentenceStartRule(messages, this),
new MultipleWhitespaceRule(messages, this)
);
| 313
| 105
| 418
|
<methods>public java.lang.String adaptSuggestion(java.lang.String) ,public List<org.languagetool.rules.RuleMatch> adaptSuggestions(List<org.languagetool.rules.RuleMatch>, Set<java.lang.String>) ,public org.languagetool.rules.RuleMatch adjustMatch(org.languagetool.rules.RuleMatch, List<java.lang.String>) ,public org.languagetool.chunking.Chunker createDefaultChunker() ,public org.languagetool.tagging.disambiguation.Disambiguator createDefaultDisambiguator() ,public org.languagetool.JLanguageTool createDefaultJLanguageTool() ,public org.languagetool.chunking.Chunker createDefaultPostDisambiguationChunker() ,public org.languagetool.tokenizers.SentenceTokenizer createDefaultSentenceTokenizer() ,public org.languagetool.synthesis.Synthesizer createDefaultSynthesizer() ,public org.languagetool.tagging.Tagger createDefaultTagger() ,public org.languagetool.tokenizers.Tokenizer createDefaultWordTokenizer() ,public boolean equals(java.lang.Object) ,public boolean equalsConsiderVariantsIfSpecified(org.languagetool.Language) ,public synchronized org.languagetool.chunking.Chunker getChunker() ,public java.lang.String getClosingDoubleQuote() ,public java.lang.String getClosingSingleQuote() ,public java.lang.String getCommonWordsPath() ,public java.lang.String getConsistencyRulePrefix() ,public abstract java.lang.String[] getCountries() ,public List<java.lang.String> getDefaultDisabledRulesForVariant() ,public List<java.lang.String> getDefaultEnabledRulesForVariant() ,public org.languagetool.Language getDefaultLanguageVariant() ,public org.languagetool.rules.spelling.SpellingCheckRule getDefaultSpellingRule() ,public org.languagetool.rules.spelling.SpellingCheckRule getDefaultSpellingRule(java.util.ResourceBundle) ,public org.languagetool.rules.patterns.Unifier getDisambiguationUnifier() ,public org.languagetool.rules.patterns.UnifierConfiguration getDisambiguationUnifierConfiguration() ,public synchronized org.languagetool.tagging.disambiguation.Disambiguator getDisambiguator() ,public java.util.regex.Pattern getIgnoredCharactersRegex() ,public org.languagetool.languagemodel.LanguageModel getLanguageModel(java.io.File) throws java.io.IOException,public java.util.Locale getLocale() ,public java.util.Locale getLocaleWithCountryAndVariant() ,public org.languagetool.LanguageMaintainedState getMaintainedState() ,public abstract org.languagetool.language.Contributor[] getMaintainers() ,public org.languagetool.rules.spelling.multitoken.MultitokenSpeller getMultitokenSpeller() ,public abstract java.lang.String getName() ,public java.lang.String getOpeningDoubleQuote() ,public java.lang.String getOpeningSingleQuote() ,public synchronized org.languagetool.chunking.Chunker getPostDisambiguationChunker() ,public Map<java.lang.String,java.lang.Integer> getPriorityMap() ,public List<org.languagetool.rules.Rule> getRelevantLanguageModelCapableRules(java.util.ResourceBundle, org.languagetool.languagemodel.LanguageModel, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantLanguageModelRules(java.util.ResourceBundle, org.languagetool.languagemodel.LanguageModel, org.languagetool.UserConfig) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantRemoteRules(java.util.ResourceBundle, List<org.languagetool.rules.RemoteRuleConfig>, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>, boolean) throws java.io.IOException,public abstract List<org.languagetool.rules.Rule> getRelevantRules(java.util.ResourceBundle, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantRulesGlobalConfig(java.util.ResourceBundle, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public Function<org.languagetool.rules.Rule,org.languagetool.rules.Rule> getRemoteEnhancedRules(java.util.ResourceBundle, List<org.languagetool.rules.RemoteRuleConfig>, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>, boolean) throws java.io.IOException,public List<java.lang.String> getRuleFileNames() ,public int getRulePriority(org.languagetool.rules.Rule) ,public synchronized org.languagetool.tokenizers.SentenceTokenizer getSentenceTokenizer() ,public abstract java.lang.String getShortCode() ,public final java.lang.String getShortCodeWithCountryAndVariant() ,public synchronized org.languagetool.synthesis.Synthesizer getSynthesizer() ,public synchronized org.languagetool.tagging.Tagger getTagger() ,public final java.lang.String getTranslatedName(java.util.ResourceBundle) ,public org.languagetool.rules.patterns.Unifier getUnifier() ,public org.languagetool.rules.patterns.UnifierConfiguration getUnifierConfiguration() ,public java.lang.String getVariant() ,public synchronized org.languagetool.tokenizers.Tokenizer getWordTokenizer() ,public boolean hasMinMatchesRules() ,public boolean hasNGramFalseFriendRule(org.languagetool.Language) ,public final boolean hasVariant() ,public int hashCode() ,public boolean isAdvancedTypographyEnabled() ,public boolean isExternal() ,public boolean isHiddenFromGui() ,public boolean isSpellcheckOnlyLanguage() ,public boolean isVariant() ,public List<org.languagetool.rules.RuleMatch> mergeSuggestions(List<org.languagetool.rules.RuleMatch>, org.languagetool.markup.AnnotatedText, Set<java.lang.String>) ,public List<java.lang.String> prepareLineForSpeller(java.lang.String) ,public void setChunker(org.languagetool.chunking.Chunker) ,public void setDisambiguator(org.languagetool.tagging.disambiguation.Disambiguator) ,public void setPostDisambiguationChunker(org.languagetool.chunking.Chunker) ,public void setSentenceTokenizer(org.languagetool.tokenizers.SentenceTokenizer) ,public void setSynthesizer(org.languagetool.synthesis.Synthesizer) ,public void setTagger(org.languagetool.tagging.Tagger) ,public void setWordTokenizer(org.languagetool.tokenizers.Tokenizer) ,public java.lang.String toAdvancedTypography(java.lang.String) ,public final java.lang.String toString() <variables>private static final java.util.regex.Pattern APOSTROPHE,private static final org.languagetool.tagging.disambiguation.Disambiguator DEMO_DISAMBIGUATOR,private static final org.languagetool.tagging.Tagger DEMO_TAGGER,private static final java.util.regex.Pattern DOUBLE_QUOTE_PATTERN,private static final java.util.regex.Pattern ELLIPSIS,private static final java.util.regex.Pattern INSIDE_SUGGESTION,private static final java.util.regex.Pattern NBSPACE1,private static final java.util.regex.Pattern NBSPACE2,private static final java.util.regex.Pattern QUOTED_CHAR_PATTERN,private static final org.languagetool.tokenizers.SentenceTokenizer SENTENCE_TOKENIZER,private static final java.util.regex.Pattern SINGLE_QUOTE_PATTERN,private static final java.util.regex.Pattern SUGGESTION_CLOSE_TAG,private static final java.util.regex.Pattern SUGGESTION_OPEN_TAG,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_1,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_2,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_3,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_4,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_5,private static final org.languagetool.tokenizers.WordTokenizer WORD_TOKENIZER,private org.languagetool.chunking.Chunker chunker,private final org.languagetool.rules.patterns.UnifierConfiguration disambiguationUnifierConfig,private org.languagetool.tagging.disambiguation.Disambiguator disambiguator,private final java.util.regex.Pattern ignoredCharactersRegex,private static final Map<Class<org.languagetool.Language>,org.languagetool.JLanguageTool> languagetoolInstances,private static final Logger logger,private final java.util.concurrent.atomic.AtomicBoolean noLmWarningPrinted,private List<org.languagetool.rules.patterns.AbstractPatternRule> patternRules,private org.languagetool.chunking.Chunker postDisambiguationChunker,private org.languagetool.tokenizers.SentenceTokenizer sentenceTokenizer,private java.lang.String shortCodeWithCountryAndVariant,private static final Map<Class<? extends org.languagetool.Language>,org.languagetool.rules.spelling.SpellingCheckRule> spellingRules,private org.languagetool.synthesis.Synthesizer synthesizer,private org.languagetool.tagging.Tagger tagger,private final org.languagetool.rules.patterns.UnifierConfiguration unifierConfig,private org.languagetool.tokenizers.Tokenizer wordTokenizer
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/be/src/main/java/org/languagetool/language/Belarusian.java
|
Belarusian
|
getPriorityForId
|
class Belarusian extends Language implements AutoCloseable {
private LanguageModel languageModel;
@Override
public Pattern getIgnoredCharactersRegex() {
return Pattern.compile("[\u00AD\u0301\u0300]");
}
@Override
public String getName() {
return "Belarusian";
}
@Override
public String getShortCode() {
return "be";
}
@Override
public String[] getCountries() {
return new String[]{"BY"};
}
@NotNull
@Override
public Tagger createDefaultTagger() {
return new DemoTagger();
}
@Override
public SentenceTokenizer createDefaultSentenceTokenizer() {
return new SRXSentenceTokenizer(this);
}
@Override
public Tokenizer createDefaultWordTokenizer() {
return new BelarusianWordTokenizer();
}
@Override
public Contributor[] getMaintainers() {
return new Contributor[] { new Contributor("Alex Buloichik") };
}
/** @since 5.1 */
@Override
public String getOpeningDoubleQuote() {
return "«";
}
/** @since 5.1 */
@Override
public String getClosingDoubleQuote() {
return "»";
}
/** @since 5.1 */
@Override
public String getOpeningSingleQuote() {
return "‘";
}
/** @since 5.1 */
@Override
public String getClosingSingleQuote() {
return "’";
}
/** @since 5.1 */
@Override
public boolean isAdvancedTypographyEnabled() {
return true;
}
@Override
public List<Rule> getRelevantRules(ResourceBundle messages, UserConfig userConfig, Language motherTongue, List<Language> altLanguages) throws IOException {
return Arrays.asList(
new CommaWhitespaceRule(messages),
new DoublePunctuationRule(messages),
new MorfologikBelarusianSpellerRule(messages, this, userConfig, altLanguages),
new UppercaseSentenceStartRule(messages, this),
new MultipleWhitespaceRule(messages, this),
new SentenceWhitespaceRule(messages),
new WhiteSpaceBeforeParagraphEnd(messages, this),
new WhiteSpaceAtBeginOfParagraph(messages),
new LongSentenceRule(messages, userConfig, 50),
new LongParagraphRule(messages, this, userConfig),
new ParagraphRepeatBeginningRule(messages, this), //re-activate rule, issue #3509
// new PunctuationMarkAtParagraphEnd(messages, this),
new PunctuationMarkAtParagraphEnd2(messages, this),
new SimpleReplaceRule(messages),
new BelarusianSpecificCaseRule(messages)
);
}
/** @since 3.1 */
@Override
public synchronized LanguageModel getLanguageModel(File indexDir) throws IOException {
languageModel = initLanguageModel(indexDir, languageModel);
return languageModel;
}
/**
* Closes the language model, if any.
* @since 3.1
*/
@Override
public void close() throws Exception {
if (languageModel != null) {
languageModel.close();
}
}
@Override
protected int getPriorityForId(String id) {<FILL_FUNCTION_BODY>}
@Nullable
@Override
protected SpellingCheckRule createDefaultSpellingRule(ResourceBundle messages) throws IOException {
return new MorfologikBelarusianSpellerRule(messages, this, null, null);
}
}
|
switch (id) {
case "RUSSIAN_SIMPLE_REPLACE_RULE": return 10; // higher prio than spell checker
case "BELARUSIAN_SPECIFIC_CASE": return 9; // higher prio than spell checker
case "Word_root_repeat": return -1;
case "PUNCT_DPT_2": return -2;
case "TOO_LONG_PARAGRAPH": return -15;
}
return super.getPriorityForId(id);
| 981
| 149
| 1,130
|
<methods>public java.lang.String adaptSuggestion(java.lang.String) ,public List<org.languagetool.rules.RuleMatch> adaptSuggestions(List<org.languagetool.rules.RuleMatch>, Set<java.lang.String>) ,public org.languagetool.rules.RuleMatch adjustMatch(org.languagetool.rules.RuleMatch, List<java.lang.String>) ,public org.languagetool.chunking.Chunker createDefaultChunker() ,public org.languagetool.tagging.disambiguation.Disambiguator createDefaultDisambiguator() ,public org.languagetool.JLanguageTool createDefaultJLanguageTool() ,public org.languagetool.chunking.Chunker createDefaultPostDisambiguationChunker() ,public org.languagetool.tokenizers.SentenceTokenizer createDefaultSentenceTokenizer() ,public org.languagetool.synthesis.Synthesizer createDefaultSynthesizer() ,public org.languagetool.tagging.Tagger createDefaultTagger() ,public org.languagetool.tokenizers.Tokenizer createDefaultWordTokenizer() ,public boolean equals(java.lang.Object) ,public boolean equalsConsiderVariantsIfSpecified(org.languagetool.Language) ,public synchronized org.languagetool.chunking.Chunker getChunker() ,public java.lang.String getClosingDoubleQuote() ,public java.lang.String getClosingSingleQuote() ,public java.lang.String getCommonWordsPath() ,public java.lang.String getConsistencyRulePrefix() ,public abstract java.lang.String[] getCountries() ,public List<java.lang.String> getDefaultDisabledRulesForVariant() ,public List<java.lang.String> getDefaultEnabledRulesForVariant() ,public org.languagetool.Language getDefaultLanguageVariant() ,public org.languagetool.rules.spelling.SpellingCheckRule getDefaultSpellingRule() ,public org.languagetool.rules.spelling.SpellingCheckRule getDefaultSpellingRule(java.util.ResourceBundle) ,public org.languagetool.rules.patterns.Unifier getDisambiguationUnifier() ,public org.languagetool.rules.patterns.UnifierConfiguration getDisambiguationUnifierConfiguration() ,public synchronized org.languagetool.tagging.disambiguation.Disambiguator getDisambiguator() ,public java.util.regex.Pattern getIgnoredCharactersRegex() ,public org.languagetool.languagemodel.LanguageModel getLanguageModel(java.io.File) throws java.io.IOException,public java.util.Locale getLocale() ,public java.util.Locale getLocaleWithCountryAndVariant() ,public org.languagetool.LanguageMaintainedState getMaintainedState() ,public abstract org.languagetool.language.Contributor[] getMaintainers() ,public org.languagetool.rules.spelling.multitoken.MultitokenSpeller getMultitokenSpeller() ,public abstract java.lang.String getName() ,public java.lang.String getOpeningDoubleQuote() ,public java.lang.String getOpeningSingleQuote() ,public synchronized org.languagetool.chunking.Chunker getPostDisambiguationChunker() ,public Map<java.lang.String,java.lang.Integer> getPriorityMap() ,public List<org.languagetool.rules.Rule> getRelevantLanguageModelCapableRules(java.util.ResourceBundle, org.languagetool.languagemodel.LanguageModel, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantLanguageModelRules(java.util.ResourceBundle, org.languagetool.languagemodel.LanguageModel, org.languagetool.UserConfig) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantRemoteRules(java.util.ResourceBundle, List<org.languagetool.rules.RemoteRuleConfig>, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>, boolean) throws java.io.IOException,public abstract List<org.languagetool.rules.Rule> getRelevantRules(java.util.ResourceBundle, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantRulesGlobalConfig(java.util.ResourceBundle, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public Function<org.languagetool.rules.Rule,org.languagetool.rules.Rule> getRemoteEnhancedRules(java.util.ResourceBundle, List<org.languagetool.rules.RemoteRuleConfig>, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>, boolean) throws java.io.IOException,public List<java.lang.String> getRuleFileNames() ,public int getRulePriority(org.languagetool.rules.Rule) ,public synchronized org.languagetool.tokenizers.SentenceTokenizer getSentenceTokenizer() ,public abstract java.lang.String getShortCode() ,public final java.lang.String getShortCodeWithCountryAndVariant() ,public synchronized org.languagetool.synthesis.Synthesizer getSynthesizer() ,public synchronized org.languagetool.tagging.Tagger getTagger() ,public final java.lang.String getTranslatedName(java.util.ResourceBundle) ,public org.languagetool.rules.patterns.Unifier getUnifier() ,public org.languagetool.rules.patterns.UnifierConfiguration getUnifierConfiguration() ,public java.lang.String getVariant() ,public synchronized org.languagetool.tokenizers.Tokenizer getWordTokenizer() ,public boolean hasMinMatchesRules() ,public boolean hasNGramFalseFriendRule(org.languagetool.Language) ,public final boolean hasVariant() ,public int hashCode() ,public boolean isAdvancedTypographyEnabled() ,public boolean isExternal() ,public boolean isHiddenFromGui() ,public boolean isSpellcheckOnlyLanguage() ,public boolean isVariant() ,public List<org.languagetool.rules.RuleMatch> mergeSuggestions(List<org.languagetool.rules.RuleMatch>, org.languagetool.markup.AnnotatedText, Set<java.lang.String>) ,public List<java.lang.String> prepareLineForSpeller(java.lang.String) ,public void setChunker(org.languagetool.chunking.Chunker) ,public void setDisambiguator(org.languagetool.tagging.disambiguation.Disambiguator) ,public void setPostDisambiguationChunker(org.languagetool.chunking.Chunker) ,public void setSentenceTokenizer(org.languagetool.tokenizers.SentenceTokenizer) ,public void setSynthesizer(org.languagetool.synthesis.Synthesizer) ,public void setTagger(org.languagetool.tagging.Tagger) ,public void setWordTokenizer(org.languagetool.tokenizers.Tokenizer) ,public java.lang.String toAdvancedTypography(java.lang.String) ,public final java.lang.String toString() <variables>private static final java.util.regex.Pattern APOSTROPHE,private static final org.languagetool.tagging.disambiguation.Disambiguator DEMO_DISAMBIGUATOR,private static final org.languagetool.tagging.Tagger DEMO_TAGGER,private static final java.util.regex.Pattern DOUBLE_QUOTE_PATTERN,private static final java.util.regex.Pattern ELLIPSIS,private static final java.util.regex.Pattern INSIDE_SUGGESTION,private static final java.util.regex.Pattern NBSPACE1,private static final java.util.regex.Pattern NBSPACE2,private static final java.util.regex.Pattern QUOTED_CHAR_PATTERN,private static final org.languagetool.tokenizers.SentenceTokenizer SENTENCE_TOKENIZER,private static final java.util.regex.Pattern SINGLE_QUOTE_PATTERN,private static final java.util.regex.Pattern SUGGESTION_CLOSE_TAG,private static final java.util.regex.Pattern SUGGESTION_OPEN_TAG,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_1,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_2,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_3,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_4,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_5,private static final org.languagetool.tokenizers.WordTokenizer WORD_TOKENIZER,private org.languagetool.chunking.Chunker chunker,private final org.languagetool.rules.patterns.UnifierConfiguration disambiguationUnifierConfig,private org.languagetool.tagging.disambiguation.Disambiguator disambiguator,private final java.util.regex.Pattern ignoredCharactersRegex,private static final Map<Class<org.languagetool.Language>,org.languagetool.JLanguageTool> languagetoolInstances,private static final Logger logger,private final java.util.concurrent.atomic.AtomicBoolean noLmWarningPrinted,private List<org.languagetool.rules.patterns.AbstractPatternRule> patternRules,private org.languagetool.chunking.Chunker postDisambiguationChunker,private org.languagetool.tokenizers.SentenceTokenizer sentenceTokenizer,private java.lang.String shortCodeWithCountryAndVariant,private static final Map<Class<? extends org.languagetool.Language>,org.languagetool.rules.spelling.SpellingCheckRule> spellingRules,private org.languagetool.synthesis.Synthesizer synthesizer,private org.languagetool.tagging.Tagger tagger,private final org.languagetool.rules.patterns.UnifierConfiguration unifierConfig,private org.languagetool.tokenizers.Tokenizer wordTokenizer
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/be/src/main/java/org/languagetool/tokenizers/be/BelarusianWordTokenizer.java
|
BelarusianWordTokenizer
|
tokenize
|
class BelarusianWordTokenizer extends WordTokenizer {
private final String tokenizingCharacters;
public BelarusianWordTokenizer() {
tokenizingCharacters = super.getTokenizingCharacters().
replace("'", "").
replace("’", "").
replace("ʼ", "");
}
@Override
public String getTokenizingCharacters() {
return tokenizingCharacters;
}
@Override
public List<String> tokenize(String text) {<FILL_FUNCTION_BODY>}
}
|
List<String> l = new ArrayList<>();
StringTokenizer st = new StringTokenizer(text, getTokenizingCharacters(), true);
while (st.hasMoreElements()) {
l.add(st.nextToken());
}
List<String> outputlist = new ArrayList<>();
for (String token : joinEMailsAndUrls(l)) {
if (token.length() > 1) {
outputlist.add(token.replace('’', '\''));
} else
outputlist.add(token);
}
return outputlist;
| 140
| 142
| 282
|
<methods>public non-sealed void <init>() ,public static List<java.lang.String> getProtocols() ,public java.lang.String getTokenizingCharacters() ,public boolean isCurrencyExpression(java.lang.String) ,public static boolean isEMail(java.lang.String) ,public static boolean isUrl(java.lang.String) ,public List<java.lang.String> replaceEmojis(java.lang.String) ,public List<java.lang.String> restoreEmojis(List<java.lang.String>, List<java.lang.String>) ,public List<java.lang.String> splitCurrencyExpression(java.lang.String) ,public List<java.lang.String> tokenize(java.lang.String) <variables>private static final java.util.regex.Pattern CURRENCY_EXPRESSION,private static final java.util.regex.Pattern CURRENCY_SYMBOLS,private static final java.util.regex.Pattern CURRENCY_VALUE,private static final java.util.regex.Pattern DOMAIN_CHARS,private static final java.util.regex.Pattern E_MAIL,private static final java.util.regex.Pattern NO_PROTOCOL_URL,private static final List<java.lang.String> PROTOCOLS,protected final java.lang.String REMOVED_EMOJI,private static final java.lang.String TOKENIZING_CHARACTERS,private static final java.util.regex.Pattern URL_CHARS
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/br/src/main/java/org/languagetool/language/Breton.java
|
Breton
|
getRelevantRules
|
class Breton extends Language {
@Override
public SentenceTokenizer createDefaultSentenceTokenizer() {
return new SRXSentenceTokenizer(this);
}
@Override
public Tokenizer createDefaultWordTokenizer() {
return new BretonWordTokenizer();
}
@Override
public String getName() {
return "Breton";
}
@Override
public String getShortCode() {
return "br";
}
@Override
public String[] getCountries() {
return new String[] {"FR"};
}
@NotNull
@Override
public Tagger createDefaultTagger() {
return new BretonTagger();
}
@Override
public Disambiguator createDefaultDisambiguator() {
return new XmlRuleDisambiguator(this);
}
@Override
public Contributor[] getMaintainers() {
return new Contributor[] {
Contributors.DOMINIQUE_PELLE, new Contributor("Fulup Jakez")
};
}
@Override
public List<Rule> getRelevantRules(ResourceBundle messages, UserConfig userConfig, Language motherTongue, List<Language> altLanguages) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public LanguageMaintainedState getMaintainedState() {
return LanguageMaintainedState.ActivelyMaintained;
}
@Nullable
@Override
protected SpellingCheckRule createDefaultSpellingRule(ResourceBundle messages) throws IOException {
return new MorfologikBretonSpellerRule(messages, this, null, null);
}
}
|
return Arrays.asList(
new CommaWhitespaceRule(messages),
new DoublePunctuationRule(messages),
new MorfologikBretonSpellerRule(messages, this, userConfig, altLanguages),
new UppercaseSentenceStartRule(messages, this),
new MultipleWhitespaceRule(messages, this),
new SentenceWhitespaceRule(messages),
new TopoReplaceRule(messages)
);
| 425
| 112
| 537
|
<methods>public java.lang.String adaptSuggestion(java.lang.String) ,public List<org.languagetool.rules.RuleMatch> adaptSuggestions(List<org.languagetool.rules.RuleMatch>, Set<java.lang.String>) ,public org.languagetool.rules.RuleMatch adjustMatch(org.languagetool.rules.RuleMatch, List<java.lang.String>) ,public org.languagetool.chunking.Chunker createDefaultChunker() ,public org.languagetool.tagging.disambiguation.Disambiguator createDefaultDisambiguator() ,public org.languagetool.JLanguageTool createDefaultJLanguageTool() ,public org.languagetool.chunking.Chunker createDefaultPostDisambiguationChunker() ,public org.languagetool.tokenizers.SentenceTokenizer createDefaultSentenceTokenizer() ,public org.languagetool.synthesis.Synthesizer createDefaultSynthesizer() ,public org.languagetool.tagging.Tagger createDefaultTagger() ,public org.languagetool.tokenizers.Tokenizer createDefaultWordTokenizer() ,public boolean equals(java.lang.Object) ,public boolean equalsConsiderVariantsIfSpecified(org.languagetool.Language) ,public synchronized org.languagetool.chunking.Chunker getChunker() ,public java.lang.String getClosingDoubleQuote() ,public java.lang.String getClosingSingleQuote() ,public java.lang.String getCommonWordsPath() ,public java.lang.String getConsistencyRulePrefix() ,public abstract java.lang.String[] getCountries() ,public List<java.lang.String> getDefaultDisabledRulesForVariant() ,public List<java.lang.String> getDefaultEnabledRulesForVariant() ,public org.languagetool.Language getDefaultLanguageVariant() ,public org.languagetool.rules.spelling.SpellingCheckRule getDefaultSpellingRule() ,public org.languagetool.rules.spelling.SpellingCheckRule getDefaultSpellingRule(java.util.ResourceBundle) ,public org.languagetool.rules.patterns.Unifier getDisambiguationUnifier() ,public org.languagetool.rules.patterns.UnifierConfiguration getDisambiguationUnifierConfiguration() ,public synchronized org.languagetool.tagging.disambiguation.Disambiguator getDisambiguator() ,public java.util.regex.Pattern getIgnoredCharactersRegex() ,public org.languagetool.languagemodel.LanguageModel getLanguageModel(java.io.File) throws java.io.IOException,public java.util.Locale getLocale() ,public java.util.Locale getLocaleWithCountryAndVariant() ,public org.languagetool.LanguageMaintainedState getMaintainedState() ,public abstract org.languagetool.language.Contributor[] getMaintainers() ,public org.languagetool.rules.spelling.multitoken.MultitokenSpeller getMultitokenSpeller() ,public abstract java.lang.String getName() ,public java.lang.String getOpeningDoubleQuote() ,public java.lang.String getOpeningSingleQuote() ,public synchronized org.languagetool.chunking.Chunker getPostDisambiguationChunker() ,public Map<java.lang.String,java.lang.Integer> getPriorityMap() ,public List<org.languagetool.rules.Rule> getRelevantLanguageModelCapableRules(java.util.ResourceBundle, org.languagetool.languagemodel.LanguageModel, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantLanguageModelRules(java.util.ResourceBundle, org.languagetool.languagemodel.LanguageModel, org.languagetool.UserConfig) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantRemoteRules(java.util.ResourceBundle, List<org.languagetool.rules.RemoteRuleConfig>, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>, boolean) throws java.io.IOException,public abstract List<org.languagetool.rules.Rule> getRelevantRules(java.util.ResourceBundle, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public List<org.languagetool.rules.Rule> getRelevantRulesGlobalConfig(java.util.ResourceBundle, org.languagetool.GlobalConfig, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public Function<org.languagetool.rules.Rule,org.languagetool.rules.Rule> getRemoteEnhancedRules(java.util.ResourceBundle, List<org.languagetool.rules.RemoteRuleConfig>, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>, boolean) throws java.io.IOException,public List<java.lang.String> getRuleFileNames() ,public int getRulePriority(org.languagetool.rules.Rule) ,public synchronized org.languagetool.tokenizers.SentenceTokenizer getSentenceTokenizer() ,public abstract java.lang.String getShortCode() ,public final java.lang.String getShortCodeWithCountryAndVariant() ,public synchronized org.languagetool.synthesis.Synthesizer getSynthesizer() ,public synchronized org.languagetool.tagging.Tagger getTagger() ,public final java.lang.String getTranslatedName(java.util.ResourceBundle) ,public org.languagetool.rules.patterns.Unifier getUnifier() ,public org.languagetool.rules.patterns.UnifierConfiguration getUnifierConfiguration() ,public java.lang.String getVariant() ,public synchronized org.languagetool.tokenizers.Tokenizer getWordTokenizer() ,public boolean hasMinMatchesRules() ,public boolean hasNGramFalseFriendRule(org.languagetool.Language) ,public final boolean hasVariant() ,public int hashCode() ,public boolean isAdvancedTypographyEnabled() ,public boolean isExternal() ,public boolean isHiddenFromGui() ,public boolean isSpellcheckOnlyLanguage() ,public boolean isVariant() ,public List<org.languagetool.rules.RuleMatch> mergeSuggestions(List<org.languagetool.rules.RuleMatch>, org.languagetool.markup.AnnotatedText, Set<java.lang.String>) ,public List<java.lang.String> prepareLineForSpeller(java.lang.String) ,public void setChunker(org.languagetool.chunking.Chunker) ,public void setDisambiguator(org.languagetool.tagging.disambiguation.Disambiguator) ,public void setPostDisambiguationChunker(org.languagetool.chunking.Chunker) ,public void setSentenceTokenizer(org.languagetool.tokenizers.SentenceTokenizer) ,public void setSynthesizer(org.languagetool.synthesis.Synthesizer) ,public void setTagger(org.languagetool.tagging.Tagger) ,public void setWordTokenizer(org.languagetool.tokenizers.Tokenizer) ,public java.lang.String toAdvancedTypography(java.lang.String) ,public final java.lang.String toString() <variables>private static final java.util.regex.Pattern APOSTROPHE,private static final org.languagetool.tagging.disambiguation.Disambiguator DEMO_DISAMBIGUATOR,private static final org.languagetool.tagging.Tagger DEMO_TAGGER,private static final java.util.regex.Pattern DOUBLE_QUOTE_PATTERN,private static final java.util.regex.Pattern ELLIPSIS,private static final java.util.regex.Pattern INSIDE_SUGGESTION,private static final java.util.regex.Pattern NBSPACE1,private static final java.util.regex.Pattern NBSPACE2,private static final java.util.regex.Pattern QUOTED_CHAR_PATTERN,private static final org.languagetool.tokenizers.SentenceTokenizer SENTENCE_TOKENIZER,private static final java.util.regex.Pattern SINGLE_QUOTE_PATTERN,private static final java.util.regex.Pattern SUGGESTION_CLOSE_TAG,private static final java.util.regex.Pattern SUGGESTION_OPEN_TAG,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_1,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_2,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_3,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_4,private static final java.util.regex.Pattern TYPOGRAPHY_PATTERN_5,private static final org.languagetool.tokenizers.WordTokenizer WORD_TOKENIZER,private org.languagetool.chunking.Chunker chunker,private final org.languagetool.rules.patterns.UnifierConfiguration disambiguationUnifierConfig,private org.languagetool.tagging.disambiguation.Disambiguator disambiguator,private final java.util.regex.Pattern ignoredCharactersRegex,private static final Map<Class<org.languagetool.Language>,org.languagetool.JLanguageTool> languagetoolInstances,private static final Logger logger,private final java.util.concurrent.atomic.AtomicBoolean noLmWarningPrinted,private List<org.languagetool.rules.patterns.AbstractPatternRule> patternRules,private org.languagetool.chunking.Chunker postDisambiguationChunker,private org.languagetool.tokenizers.SentenceTokenizer sentenceTokenizer,private java.lang.String shortCodeWithCountryAndVariant,private static final Map<Class<? extends org.languagetool.Language>,org.languagetool.rules.spelling.SpellingCheckRule> spellingRules,private org.languagetool.synthesis.Synthesizer synthesizer,private org.languagetool.tagging.Tagger tagger,private final org.languagetool.rules.patterns.UnifierConfiguration unifierConfig,private org.languagetool.tokenizers.Tokenizer wordTokenizer
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/br/src/main/java/org/languagetool/rules/br/BretonCompoundRule.java
|
BretonCompoundRule
|
getCompoundRuleData
|
class BretonCompoundRule extends AbstractCompoundRule {
private static volatile CompoundRuleData compoundData;
public BretonCompoundRule(ResourceBundle messages, Language lang, UserConfig userConfig) throws IOException {
super(messages, lang, userConfig,
"Skrivet e vez ar ger-mañ boaz gant ur varrennig-stagañ.",
"Ar ger-mañ a zo skrivet boaz evel unan hepken.",
"An droienn-mañ a zo skrivet evel ur ger hepken pe gant ur varrennig-stagañ.",
"Kudenn barrennig-stagañ");
super.setCategory(Categories.COMPOUNDING.getCategory(messages));
addExamplePair(Example.wrong("Gwelet em eus un <marker>alc'hweder gwez</marker> e-kerzh an dibenn-sizhun-mañ."),
Example.fixed("Gwelet em eus un <marker>alc'hweder-gwez</marker> e-kerzh an dibenn-sizhun-mañ."));
setLocQualityIssueType(ITSIssueType.Grammar);
}
@Override
public String getId() {
return "BR_COMPOUNDS";
}
@Override
public String getDescription() {
return "Mots composés";
}
/*
@Override
public URL getUrl() {
return Tools.getUrl("https://pt.wikipedia.org/wiki/Lista_das_alterações_previstas_pelo_acordo_ortográfico_de_1990");
}
*/
@Override
public CompoundRuleData getCompoundRuleData() {<FILL_FUNCTION_BODY>}
}
|
CompoundRuleData data = compoundData;
if (data == null) {
synchronized (BretonCompoundRule.class) {
data = compoundData;
if (data == null) {
compoundData = data = new CompoundRuleData("/br/compounds.txt");
}
}
}
return data;
| 471
| 88
| 559
|
<methods>public void <init>(java.util.ResourceBundle, org.languagetool.Language, org.languagetool.UserConfig, java.lang.String, java.lang.String, java.lang.String) throws java.io.IOException,public void <init>(java.util.ResourceBundle, org.languagetool.Language, org.languagetool.UserConfig, java.lang.String, java.lang.String, java.lang.String, java.lang.String) throws java.io.IOException,public int estimateContextForSureMatch() ,public abstract org.languagetool.rules.CompoundRuleData getCompoundRuleData() ,public abstract java.lang.String getDescription() ,public abstract java.lang.String getId() ,public boolean isMisspelled(java.lang.String) throws java.io.IOException,public org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public java.lang.String mergeCompound(java.lang.String, boolean) ,public void useSubRuleSpecificIds() <variables>private static final java.util.regex.Pattern DASHES,private static final java.util.regex.Pattern DIGIT,static final int MAX_TERMS,private static final java.util.regex.Pattern WHITESPACE,private static final java.util.regex.Pattern WHITESPACE_DASH,protected final non-sealed org.languagetool.Language lang,protected final non-sealed org.languagetool.LinguServices linguServices,protected boolean sentenceStartsWithUpperCase,private final non-sealed java.lang.String shortDesc,protected boolean subRuleSpecificIds,private final non-sealed java.lang.String withHyphenMessage,private final non-sealed java.lang.String withOrWithoutHyphenMessage,private final non-sealed java.lang.String withoutHyphenMessage
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/br/src/main/java/org/languagetool/rules/br/DateCheckFilter.java
|
DateCheckFilter
|
getDayOfWeek
|
class DateCheckFilter extends AbstractDateCheckFilter {
@Override
protected Calendar getCalendar() {
return Calendar.getInstance(Locale.UK);
}
@SuppressWarnings("ControlFlowStatementWithoutBraces")
@Override
protected int getDayOfMonth(String dayStr) {
String day = dayStr.toLowerCase();
if (day.charAt(0) == 't') day = 'd' + day.substring(1);
if (day.charAt(0) == 'p') day = 'b' + day.substring(1);
if (day.endsWith("vet")) {
// Removing final vet if any.
day = day.substring(0, day.length() - 3);
}
if (day.equals("c’hentañ") || day.equals("unan")) return 1;
if (day.equals("daou") || day.equals("eil")) return 2;
if (day.equals("dri") || day.equals("drede") || day.equals("deir")) return 3;
if (day.equals("bevar")) return 4;
// bemp or bemvet (vet has been removed).
if (day.equals("bemp") || day.equals("bem")) return 5;
if (day.equals("c’hwerc’h")) return 6;
if (day.equals("seizh")) return 7;
if (day.equals("eizh")) return 8;
// nav and navet (vet has been removed).
if (day.equals("nav") || day.equals("na")) return 9;
if (day.equals("dek")) return 10;
if (day.equals("unnek")) return 11;
if (day.equals("daouzek")) return 12;
if (day.equals("drizek")) return 13;
if (day.equals("bevarzek")) return 14;
if (day.equals("bemzek")) return 15;
if (day.equals("c’hwezek")) return 16;
if (day.equals("seitek")) return 17;
if (day.equals("driwec’h")) return 18;
if (day.equals("naontek")) return 19;
if (day.equals("ugent")) return 20;
if (day.equals("dregont")) return 30;
return 0;
}
@SuppressWarnings("ControlFlowStatementWithoutBraces")
@Override
protected int getDayOfWeek(String dayStr) {<FILL_FUNCTION_BODY>}
@SuppressWarnings("ControlFlowStatementWithoutBraces")
@Override
protected String getDayOfWeek(Calendar date) {
String englishDay = date.getDisplayName(Calendar.DAY_OF_WEEK, Calendar.LONG, Locale.UK);
if (englishDay.equals("Sunday")) return "Sul";
if (englishDay.equals("Monday")) return "Lun";
if (englishDay.equals("Tuesday")) return "Meurzh";
if (englishDay.equals("Wednesday")) return "Merc’her";
if (englishDay.equals("Thursday")) return "Yaou";
if (englishDay.equals("Friday")) return "Gwener";
if (englishDay.equals("Saturday")) return "Sadorn";
return "";
}
@SuppressWarnings({"ControlFlowStatementWithoutBraces", "MagicNumber"})
@Override
protected int getMonth(String monthStr) {
String mon = monthStr.toLowerCase();
if (mon.equals("genver")) return 1;
if (mon.equals("c’hwevrer")) return 2;
if (mon.equals("meurzh")) return 3;
if (mon.equals("ebrel")) return 4;
if (mon.equals("mae")) return 5;
if (mon.equals("mezheven") || mon.equals("even")) return 6;
if (mon.equals("gouere") || mon.equals("gouhere")) return 7;
if (mon.equals("eost")) return 8;
if (mon.equals("gwengolo")) return 9;
if (mon.equals("here")) return 10;
if (mon.equals("du")) return 11;
if (mon.equals("kerzu")) return 12;
throw new RuntimeException("Could not find month '" + monthStr + "'");
}
}
|
String day = dayStr.toLowerCase();
if (day.endsWith("sul")) return Calendar.SUNDAY;
if (day.endsWith("lun")) return Calendar.MONDAY;
if (day.endsWith("meurzh")) return Calendar.TUESDAY;
if (day.endsWith("merc’her")) return Calendar.WEDNESDAY;
if (day.equals ("yaou")) return Calendar.THURSDAY;
if (day.equals ("diriaou")) return Calendar.THURSDAY;
if (day.endsWith("gwener")) return Calendar.FRIDAY;
if (day.endsWith("sadorn")) return Calendar.SATURDAY;
throw new RuntimeException("Could not find day of week for '" + dayStr + "'");
| 1,208
| 210
| 1,418
|
<methods>public non-sealed void <init>() ,public org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) <variables>protected static final java.util.regex.Pattern DAY_OF_MONTH_PATTERN,private static final Logger logger
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/br/src/main/java/org/languagetool/rules/br/TopoReplaceRule.java
|
TopoReplaceRule
|
addToQueue
|
class TopoReplaceRule extends Rule {
public static final String BRETON_TOPO = "BR_TOPO";
private static final String FILE_NAME = "/br/topo.txt";
private static final String FILE_ENCODING = "utf-8";
// locale used on case-conversion
private static final Locale BR_LOCALE = new Locale("br");
// list of maps containing error-corrections pairs.
// the n-th map contains key strings of (n+1) words
private static final List<Map<String, String>> wrongWords = loadWords(JLanguageTool.getDataBroker().getFromRulesDirAsStream(FILE_NAME));
public TopoReplaceRule(ResourceBundle messages) {
super(messages);
super.setCategory(Categories.MISC.getCategory(messages));
}
@Override
public final String getId() {
return BRETON_TOPO;
}
@Override
public String getDescription() {
return "anvioù-lec’h e brezhoneg";
}
public String getShort() {
return "anvioù lec’h";
}
public String getSuggestion() {
return " zo un anv lec’h gallek. Ha fellout a rae deoc’h skrivañ ";
}
/**
* @return the word used to separate multiple suggestions; used only before last suggestion, the rest are comma-separated.
*/
public String getSuggestionsSeparator() {
return " pe ";
}
public boolean isCaseSensitive() {
return true;
}
/**
* locale used on case-conversion
*/
public Locale getLocale() {
return BR_LOCALE;
}
/**
* @return the list of wrong words for which this rule can suggest correction. The list cannot be modified.
*/
public List<Map<String, String>> getWrongWords() {
return wrongWords;
}
/**
* Load the list of words. Same as {@link AbstractSimpleReplaceRule#loadFromPath} but allows multiple words.
* @param stream the stream to load.
* @return the list of maps containing the error-corrections pairs. The n-th map contains key strings of (n+1) words.
*/
private static List<Map<String, String>> loadWords(InputStream stream) {
List<Map<String, String>> list = new ArrayList<>();
try (
InputStreamReader isr = new InputStreamReader(stream, FILE_ENCODING);
BufferedReader br = new BufferedReader(isr);
) {
String line;
Tokenizer wordTokenizer = new Breton().getWordTokenizer();
while ((line = br.readLine()) != null) {
line = line.trim();
if (line.isEmpty() || line.charAt(0) == '#') { // ignore comments
continue;
}
String[] parts = line.split("=");
if (parts.length != 2) {
throw new IOException("Format error in file "
+ JLanguageTool.getDataBroker().getFromRulesDirAsUrl(FILE_NAME)
+ ", line: " + line);
}
String[] wrongForms = parts[0].split("\\|"); // multiple incorrect forms
for (String wrongForm : wrongForms) {
int wordCount = 0;
List<String> tokens = wordTokenizer.tokenize(wrongForm);
for (String token : tokens) {
if (!StringTools.isWhitespace(token)) {
wordCount++;
}
}
// grow if necessary
for (int i = list.size(); i < wordCount; i++) {
list.add(new HashMap<>());
}
list.get(wordCount - 1).put(wrongForm, parts[1]);
}
}
} catch (IOException e) {
throw new RuntimeException(e);
}
// seal the result (prevent modification from outside this class)
List<Map<String,String>> result = new ArrayList<>();
for (Map<String, String> map : list) {
result.add(Collections.unmodifiableMap(map));
}
return Collections.unmodifiableList(result);
}
private void addToQueue(AnalyzedTokenReadings token,
Queue<AnalyzedTokenReadings> prevTokens) {<FILL_FUNCTION_BODY>}
@Override
public RuleMatch[] match(AnalyzedSentence sentence) {
List<RuleMatch> ruleMatches = new ArrayList<>();
AnalyzedTokenReadings[] tokens = sentence.getTokensWithoutWhitespace();
Queue<AnalyzedTokenReadings> prevTokens = new ArrayBlockingQueue<>(wrongWords.size());
for (int i = 1; i < tokens.length; i++) {
addToQueue(tokens[i], prevTokens);
StringBuilder sb = new StringBuilder();
List<String> variants = new ArrayList<>();
List<AnalyzedTokenReadings> prevTokensList = new ArrayList<>(prevTokens);
for (int j = prevTokensList.size() - 1; j >= 0; j--) {
if (j != prevTokensList.size() - 1 && prevTokensList.get(j + 1).isWhitespaceBefore()) {
sb.insert(0, " ");
}
sb.insert(0, prevTokensList.get(j).getToken());
variants.add(0, sb.toString());
}
int len = variants.size(); // prevTokensList and variants have now the same length
for (int j = 0; j < len; j++) { // longest words first
int crtWordCount = len - j;
if (prevTokensList.get(len - crtWordCount).isImmunized()) {
continue;
}
String crt = variants.get(j);
String crtMatch = isCaseSensitive()
? wrongWords.get(crtWordCount - 1).get(crt)
: wrongWords.get(crtWordCount- 1).get(crt.toLowerCase(getLocale()));
if (crtMatch != null) {
List<String> replacements = Arrays.asList(crtMatch.split("\\|"));
String msg = crt + getSuggestion();
for (int k = 0; k < replacements.size(); k++) {
if (k > 0) {
msg = msg + (k == replacements.size() - 1 ? getSuggestionsSeparator(): ", ");
}
msg += "<suggestion>" + replacements.get(k) + "</suggestion>";
}
msg += "?";
int startPos = prevTokensList.get(len - crtWordCount).getStartPos();
int endPos = prevTokensList.get(len - 1).getEndPos();
RuleMatch potentialRuleMatch = new RuleMatch(this, sentence, startPos, endPos, msg, getShort());
if (!isCaseSensitive() && StringTools.startsWithUppercase(crt)) {
for (int k = 0; k < replacements.size(); k++) {
replacements.set(k, StringTools.uppercaseFirstChar(replacements.get(k)));
}
}
potentialRuleMatch.setSuggestedReplacements(replacements);
ruleMatches.add(potentialRuleMatch);
break;
}
}
}
return toRuleMatchArray(ruleMatches);
}
}
|
boolean inserted = prevTokens.offer(token);
if (!inserted) {
prevTokens.poll();
prevTokens.offer(token);
}
| 1,945
| 48
| 1,993
|
<methods>public void <init>() ,public void <init>(java.util.ResourceBundle) ,public void addTags(List<java.lang.String>) ,public void addToneTags(List<java.lang.String>) ,public int estimateContextForSureMatch() ,public List<org.languagetool.tagging.disambiguation.rules.DisambiguationPatternRule> getAntiPatterns() ,public org.languagetool.rules.Category getCategory() ,public java.lang.String getConfigureText() ,public final List<org.languagetool.rules.CorrectExample> getCorrectExamples() ,public int getDefaultValue() ,public abstract java.lang.String getDescription() ,public int getDistanceTokens() ,public final List<org.languagetool.rules.ErrorTriggeringExample> getErrorTriggeringExamples() ,public java.lang.String getFullId() ,public abstract java.lang.String getId() ,public final List<org.languagetool.rules.IncorrectExample> getIncorrectExamples() ,public org.languagetool.rules.ITSIssueType getLocQualityIssueType() ,public int getMaxConfigurableValue() ,public int getMinConfigurableValue() ,public int getMinPrevMatches() ,public int getPriority() ,public java.lang.String getSourceFile() ,public java.lang.String getSubId() ,public List<org.languagetool.Tag> getTags() ,public List<org.languagetool.ToneTag> getToneTags() ,public java.net.URL getUrl() ,public boolean hasConfigurableValue() ,public boolean hasTag(org.languagetool.Tag) ,public boolean hasToneTag(org.languagetool.ToneTag) ,public final boolean isDefaultOff() ,public final boolean isDefaultTempOff() ,public boolean isDictionaryBasedSpellingRule() ,public boolean isGoalSpecific() ,public final boolean isOfficeDefaultOff() ,public final boolean isOfficeDefaultOn() ,public boolean isPremium() ,public abstract org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public final void setCategory(org.languagetool.rules.Category) ,public final void setCorrectExamples(List<org.languagetool.rules.CorrectExample>) ,public final void setDefaultOff() ,public final void setDefaultOn() ,public final void setDefaultTempOff() ,public void setDistanceTokens(int) ,public final void setErrorTriggeringExamples(List<org.languagetool.rules.ErrorTriggeringExample>) ,public void setGoalSpecific(boolean) ,public final void setIncorrectExamples(List<org.languagetool.rules.IncorrectExample>) ,public void setLocQualityIssueType(org.languagetool.rules.ITSIssueType) ,public void setMinPrevMatches(int) ,public final void setOfficeDefaultOff() ,public final void setOfficeDefaultOn() ,public void setPremium(boolean) ,public void setPriority(int) ,public void setTags(List<org.languagetool.Tag>) ,public void setToneTags(List<org.languagetool.ToneTag>) ,public void setUrl(java.net.URL) ,public boolean supportsLanguage(org.languagetool.Language) ,public boolean useInOffice() <variables>private static final org.languagetool.rules.Category MISC,private org.languagetool.rules.Category category,private List<org.languagetool.rules.CorrectExample> correctExamples,private boolean defaultOff,private boolean defaultTempOff,private int distanceTokens,private List<org.languagetool.rules.ErrorTriggeringExample> errorTriggeringExamples,private List<org.languagetool.rules.IncorrectExample> incorrectExamples,private boolean isGoalSpecific,private boolean isPremium,private org.languagetool.rules.ITSIssueType locQualityIssueType,protected final non-sealed java.util.ResourceBundle messages,private int minPrevMatches,private boolean officeDefaultOff,private boolean officeDefaultOn,private int priority,private List<org.languagetool.Tag> tags,private List<org.languagetool.ToneTag> toneTags,private java.net.URL url
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/br/src/main/java/org/languagetool/tagging/br/BretonTagger.java
|
BretonTagger
|
tag
|
class BretonTagger extends BaseTagger {
private static final Pattern patternSuffix = Pattern.compile("(?iu)(..+)-(mañ|se|hont)$");
private final Locale conversionLocale = Locale.getDefault();
public BretonTagger() {
super("/br/breton.dict", new Locale("br"));
}
// This method is almost the same as the 'tag' method in
// BaseTagger class, except that when probing the
// dictionary fails, it retry without the suffixes
// -mañ, -se, -hont.
@Override
public List<AnalyzedTokenReadings> tag(List<String> sentenceTokens)
throws IOException {<FILL_FUNCTION_BODY>}
private void addTokens(List<AnalyzedToken> taggedTokens, List<AnalyzedToken> l) {
if (taggedTokens != null) {
for (AnalyzedToken at : taggedTokens) {
l.add(at);
}
}
}
}
|
List<AnalyzedToken> taggerTokens;
List<AnalyzedToken> lowerTaggerTokens;
List<AnalyzedToken> upperTaggerTokens;
List<AnalyzedTokenReadings> tokenReadings = new ArrayList<>();
int pos = 0;
Matcher matcher;
for (String word : sentenceTokens) {
String probeWord = word;
if (probeWord.length() > 50) {
// avoid excessively long computation times for long (probably artificial) tokens:
List<AnalyzedToken> l = new ArrayList<>();
l.add(new AnalyzedToken(word, null, null));
tokenReadings.add(new AnalyzedTokenReadings(l, pos));
pos += word.length();
continue;
}
// This loop happens when we need to retry probing the dictionary
// which happens rarely when trying to remove suffixes -mañ, -se, etc.
for (;;) {
List<AnalyzedToken> l = new ArrayList<>();
String lowerWord = probeWord.toLowerCase(conversionLocale);
taggerTokens = asAnalyzedTokenListForTaggedWords(word, getWordTagger().tag(probeWord));
lowerTaggerTokens = asAnalyzedTokenListForTaggedWords(word, getWordTagger().tag(lowerWord));
boolean isLowercase = probeWord.equals(lowerWord);
// Normal case.
addTokens(taggerTokens, l);
if (!isLowercase) {
// Lowercase.
addTokens(lowerTaggerTokens, l);
}
// Uppercase.
if (lowerTaggerTokens.isEmpty() && taggerTokens.isEmpty()) {
if (isLowercase) {
upperTaggerTokens = asAnalyzedTokenListForTaggedWords(word,
getWordTagger().tag(StringTools.uppercaseFirstChar(probeWord)));
if (!upperTaggerTokens.isEmpty()) {
addTokens(upperTaggerTokens, l);
}
}
if (l.isEmpty()) {
if ((matcher = patternSuffix.matcher(probeWord)).find()) {
// Remove the suffix and probe dictionary again.
// So given a word such as "xxx-mañ", we're going to
// try to probe the dictionary again with "xxx" this time.
probeWord = matcher.group(1);
continue;
}
l.add(new AnalyzedToken(word, null, null));
}
}
tokenReadings.add(new AnalyzedTokenReadings(l, pos));
pos += word.length();
break;
}
}
return tokenReadings;
| 268
| 674
| 942
|
<methods>public void <init>(java.lang.String, java.util.Locale) ,public void <init>(java.lang.String, java.util.Locale, boolean) ,public void <init>(java.lang.String, java.util.Locale, boolean, boolean) ,public final org.languagetool.AnalyzedTokenReadings createNullToken(java.lang.String, int) ,public org.languagetool.AnalyzedToken createToken(java.lang.String, java.lang.String) ,public java.lang.String getDictionaryPath() ,public List<java.lang.String> getManualAdditionsFileNames() ,public List<java.lang.String> getManualRemovalsFileNames() ,public boolean overwriteWithManualTagger() ,public List<org.languagetool.AnalyzedTokenReadings> tag(List<java.lang.String>) throws java.io.IOException<variables>private static final java.lang.String CUSTOM_MANUAL_ADDITIONS_FILE,private static final java.lang.String CUSTOM_MANUAL_REMOVALS_FILE,private static final java.lang.String MANUAL_ADDITIONS_FILE,private static final java.lang.String MANUAL_REMOVALS_FILE,private final non-sealed Dictionary#RAW dictionary,private final non-sealed java.lang.String dictionaryPath,protected final non-sealed java.util.Locale locale,private final non-sealed boolean tagLowercaseWithUppercase,protected final non-sealed org.languagetool.tagging.WordTagger wordTagger
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/br/src/main/java/org/languagetool/tokenizers/br/BretonWordTokenizer.java
|
BretonWordTokenizer
|
tokenize
|
class BretonWordTokenizer extends WordTokenizer {
private static final Pattern REPL_PATTERN_1 = compile("([Cc])['’‘ʼ]([Hh])");
private static final Pattern REPL_PATTERN_2 = compile("(\\p{L})['’‘ʼ]");
private static final Pattern REPL_PATTERN_3 = compile("\u0001\u0001BR@APOS\u0001\u0001", LITERAL);
/**
* Tokenizes just like WordTokenizer with the exception that "c’h"
* is not split. "C’h" is considered as a letter in breton (trigraph)
* and it occurs in many words. So tokenizer should not split it.
* Also split things like "n’eo" into 2 tokens only "n’" + "eo".
*
* @param text Text to tokenize
* @return List of tokens.
* Note: a special string ##BR_APOS## is used to replace apostrophes
* during tokenizing.
*/
@Override
public List<String> tokenize(String text) {<FILL_FUNCTION_BODY>}
}
|
// FIXME: this is a bit of a hacky way to tokenize. It should work
// but I should work on a more elegant way.
String replaced = REPL_PATTERN_1.matcher(text).replaceAll("$1\u0001\u0001BR@APOS\u0001\u0001$2");
replaced = REPL_PATTERN_2.matcher(replaced).replaceAll("$1\u0001\u0001BR@APOS\u0001\u0001 ");
List<String> tokenList = super.tokenize(replaced);
List<String> tokens = new ArrayList<>();
// Put back apostrophes and remove spurious spaces.
Iterator<String> itr = tokenList.iterator();
while (itr.hasNext()) {
String word = REPL_PATTERN_3.matcher(itr.next()).replaceAll("’");
tokens.add(word);
if (!word.equals("’") && word.endsWith("’")) {
itr.next(); // Skip the next spurious white space.
}
}
return tokens;
| 311
| 302
| 613
|
<methods>public non-sealed void <init>() ,public static List<java.lang.String> getProtocols() ,public java.lang.String getTokenizingCharacters() ,public boolean isCurrencyExpression(java.lang.String) ,public static boolean isEMail(java.lang.String) ,public static boolean isUrl(java.lang.String) ,public List<java.lang.String> replaceEmojis(java.lang.String) ,public List<java.lang.String> restoreEmojis(List<java.lang.String>, List<java.lang.String>) ,public List<java.lang.String> splitCurrencyExpression(java.lang.String) ,public List<java.lang.String> tokenize(java.lang.String) <variables>private static final java.util.regex.Pattern CURRENCY_EXPRESSION,private static final java.util.regex.Pattern CURRENCY_SYMBOLS,private static final java.util.regex.Pattern CURRENCY_VALUE,private static final java.util.regex.Pattern DOMAIN_CHARS,private static final java.util.regex.Pattern E_MAIL,private static final java.util.regex.Pattern NO_PROTOCOL_URL,private static final List<java.lang.String> PROTOCOLS,protected final java.lang.String REMOVED_EMOJI,private static final java.lang.String TOKENIZING_CHARACTERS,private static final java.util.regex.Pattern URL_CHARS
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/language/BalearicCatalan.java
|
BalearicCatalan
|
getDefaultDisabledRulesForVariant
|
class BalearicCatalan extends Catalan {
@Override
public String getName() {
return "Catalan (Balearic)";
}
@Override
public String[] getCountries() {
return new String[]{"ES"};
}
@Override
public String getVariant() {
// unlike Valencian (ca-ES-valencia) this code is not registered by IANA language subtag registry
return "balear";
}
@Override
public List<String> getDefaultEnabledRulesForVariant() {
List<String> rules = Arrays.asList("EXIGEIX_VERBS_BALEARS");
return Collections.unmodifiableList(rules);
}
@Override
public List<String> getDefaultDisabledRulesForVariant() {<FILL_FUNCTION_BODY>}
}
|
// Important: Java rules are not disabled here
List<String> rules = Arrays.asList("EXIGEIX_VERBS_CENTRAL","CA_SIMPLE_REPLACE_BALEARIC");
return Collections.unmodifiableList(rules);
| 225
| 70
| 295
|
<methods>public non-sealed void <init>() ,public java.lang.String adaptSuggestion(java.lang.String) ,public List<org.languagetool.rules.RuleMatch> adaptSuggestions(List<org.languagetool.rules.RuleMatch>, Set<java.lang.String>) ,public org.languagetool.tagging.disambiguation.Disambiguator createDefaultDisambiguator() ,public org.languagetool.tokenizers.SentenceTokenizer createDefaultSentenceTokenizer() ,public org.languagetool.rules.spelling.SpellingCheckRule createDefaultSpellingRule(java.util.ResourceBundle) throws java.io.IOException,public org.languagetool.synthesis.Synthesizer createDefaultSynthesizer() ,public org.languagetool.tagging.Tagger createDefaultTagger() ,public org.languagetool.tokenizers.Tokenizer createDefaultWordTokenizer() ,public java.lang.String getClosingDoubleQuote() ,public java.lang.String getClosingSingleQuote() ,public java.lang.String[] getCountries() ,public org.languagetool.Language getDefaultLanguageVariant() ,public org.languagetool.LanguageMaintainedState getMaintainedState() ,public org.languagetool.language.Contributor[] getMaintainers() ,public org.languagetool.rules.spelling.multitoken.MultitokenSpeller getMultitokenSpeller() ,public java.lang.String getName() ,public java.lang.String getOpeningDoubleQuote() ,public java.lang.String getOpeningSingleQuote() ,public List<org.languagetool.rules.Rule> getRelevantRules(java.util.ResourceBundle, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public java.lang.String getShortCode() ,public boolean hasMinMatchesRules() ,public boolean isAdvancedTypographyEnabled() ,public List<org.languagetool.rules.RuleMatch> mergeSuggestions(List<org.languagetool.rules.RuleMatch>, org.languagetool.markup.AnnotatedText, Set<java.lang.String>) ,public List<java.lang.String> prepareLineForSpeller(java.lang.String) ,public java.lang.String toAdvancedTypography(java.lang.String) <variables>private static final java.util.regex.Pattern CA_APOSTROPHES1,private static final java.util.regex.Pattern CA_APOSTROPHES2,private static final java.util.regex.Pattern CA_APOSTROPHES3,private static final java.util.regex.Pattern CA_APOSTROPHES4,private static final java.util.regex.Pattern CA_APOSTROPHES5,private static final java.util.regex.Pattern CA_APOSTROPHES6,private static final java.util.regex.Pattern CA_CONTRACTIONS,private static final java.util.regex.Pattern CA_OLD_DIACRITICS,private static final java.util.regex.Pattern PATTERN_1,private static final java.util.regex.Pattern PATTERN_2,private static final java.util.regex.Pattern PATTERN_3,private static final java.util.regex.Pattern POSSESSIUS_V,private static final java.util.regex.Pattern POSSESSIUS_v,private final List<java.lang.String> spellerExceptions
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/language/ValencianCatalan.java
|
ValencianCatalan
|
getDefaultEnabledRulesForVariant
|
class ValencianCatalan extends Catalan {
@Override
public String getName() {
return "Catalan (Valencian)";
}
@Override
public String[] getCountries() {
return new String[]{"ES"};
}
@Override
public String getVariant() {
return "valencia";
}
@NotNull
@Override
public Tagger createDefaultTagger() {
return CatalanTagger.INSTANCE_VAL;
}
@Override
public List<Rule> getRelevantRules(ResourceBundle messages, UserConfig userConfig, Language motherTongue, List<Language> altLanguages) throws IOException {
List<Rule> relevantRules = new ArrayList<>(super.getRelevantRules(messages, userConfig, motherTongue,altLanguages));
relevantRules.add(new WordCoherencyValencianRule(messages));
return relevantRules;
}
@Override
public List<String> getDefaultEnabledRulesForVariant() {<FILL_FUNCTION_BODY>}
@Override
public List<String> getDefaultDisabledRulesForVariant() {
// Important: Java rules are not disabled here
List<String> rules = Arrays.asList("EXIGEIX_VERBS_CENTRAL", "EXIGEIX_ACCENTUACIO_GENERAL", "EXIGEIX_POSSESSIUS_V",
"EVITA_PRONOMS_VALENCIANS", "EVITA_DEMOSTRATIUS_EIXE", "VOCABULARI_VALENCIA", "EXIGEIX_US", "FINS_EL_GENERAL",
"EVITA_INFINITIUS_INDRE", "EVITA_DEMOSTRATIUS_ESTE");
return Collections.unmodifiableList(rules);
}
}
|
List<String> rules = Arrays.asList("EXIGEIX_VERBS_VALENCIANS", "EXIGEIX_ACCENTUACIO_VALENCIANA",
"EXIGEIX_POSSESSIUS_U", "EXIGEIX_VERBS_EIX", "EXIGEIX_VERBS_ISC", "PER_PER_A_INFINITIU", "FINS_EL_AVL");
return Collections.unmodifiableList(rules);
| 491
| 127
| 618
|
<methods>public non-sealed void <init>() ,public java.lang.String adaptSuggestion(java.lang.String) ,public List<org.languagetool.rules.RuleMatch> adaptSuggestions(List<org.languagetool.rules.RuleMatch>, Set<java.lang.String>) ,public org.languagetool.tagging.disambiguation.Disambiguator createDefaultDisambiguator() ,public org.languagetool.tokenizers.SentenceTokenizer createDefaultSentenceTokenizer() ,public org.languagetool.rules.spelling.SpellingCheckRule createDefaultSpellingRule(java.util.ResourceBundle) throws java.io.IOException,public org.languagetool.synthesis.Synthesizer createDefaultSynthesizer() ,public org.languagetool.tagging.Tagger createDefaultTagger() ,public org.languagetool.tokenizers.Tokenizer createDefaultWordTokenizer() ,public java.lang.String getClosingDoubleQuote() ,public java.lang.String getClosingSingleQuote() ,public java.lang.String[] getCountries() ,public org.languagetool.Language getDefaultLanguageVariant() ,public org.languagetool.LanguageMaintainedState getMaintainedState() ,public org.languagetool.language.Contributor[] getMaintainers() ,public org.languagetool.rules.spelling.multitoken.MultitokenSpeller getMultitokenSpeller() ,public java.lang.String getName() ,public java.lang.String getOpeningDoubleQuote() ,public java.lang.String getOpeningSingleQuote() ,public List<org.languagetool.rules.Rule> getRelevantRules(java.util.ResourceBundle, org.languagetool.UserConfig, org.languagetool.Language, List<org.languagetool.Language>) throws java.io.IOException,public java.lang.String getShortCode() ,public boolean hasMinMatchesRules() ,public boolean isAdvancedTypographyEnabled() ,public List<org.languagetool.rules.RuleMatch> mergeSuggestions(List<org.languagetool.rules.RuleMatch>, org.languagetool.markup.AnnotatedText, Set<java.lang.String>) ,public List<java.lang.String> prepareLineForSpeller(java.lang.String) ,public java.lang.String toAdvancedTypography(java.lang.String) <variables>private static final java.util.regex.Pattern CA_APOSTROPHES1,private static final java.util.regex.Pattern CA_APOSTROPHES2,private static final java.util.regex.Pattern CA_APOSTROPHES3,private static final java.util.regex.Pattern CA_APOSTROPHES4,private static final java.util.regex.Pattern CA_APOSTROPHES5,private static final java.util.regex.Pattern CA_APOSTROPHES6,private static final java.util.regex.Pattern CA_CONTRACTIONS,private static final java.util.regex.Pattern CA_OLD_DIACRITICS,private static final java.util.regex.Pattern PATTERN_1,private static final java.util.regex.Pattern PATTERN_2,private static final java.util.regex.Pattern PATTERN_3,private static final java.util.regex.Pattern POSSESSIUS_V,private static final java.util.regex.Pattern POSSESSIUS_v,private final List<java.lang.String> spellerExceptions
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/rules/ca/AbstractSimpleReplaceLemmasRule.java
|
AbstractSimpleReplaceLemmasRule
|
match
|
class AbstractSimpleReplaceLemmasRule extends AbstractSimpleReplaceRule {
protected Map<String, List<String>> wrongLemmas = null;
private static final Locale CA_LOCALE = new Locale("CA");
private CatalanSynthesizer synth;
public AbstractSimpleReplaceLemmasRule(final ResourceBundle messages, Language language) throws IOException {
super(messages, language);
//this.setIgnoreTaggedWords();
synth = (CatalanSynthesizer) language.getSynthesizer();
}
@Override
public Map<String, List<String>> getWrongWords() {
return wrongLemmas;
}
@Override
public boolean isCaseSensitive() {
return false;
}
@Override
public Locale getLocale() {
return CA_LOCALE;
}
@Override
public final RuleMatch[] match(final AnalyzedSentence sentence) {<FILL_FUNCTION_BODY>}
}
|
List<RuleMatch> ruleMatches = new ArrayList<>();
AnalyzedTokenReadings[] tokens = sentence.getTokensWithoutWhitespace();
String originalLemma = null;
for (int i=1; i<tokens.length; i++) {
List<String> replacementLemmas = null;
String replacePOSTag = null;
boolean bRuleMatches = false;
for (AnalyzedToken at: tokens[i].getReadings()){
if (getWrongWords().containsKey(at.getLemma())) {
replacementLemmas = getWrongWords().get(at.getLemma());
replacePOSTag = at.getPOSTag();
bRuleMatches = true;
originalLemma=at.getLemma();
break;
}
}
// find suggestions
List<String> possibleReplacements = new ArrayList<>();
if (replacementLemmas != null && replacePOSTag != null) {
String[] synthesized = null;
// synthesize replacements
for (String replacementLemma : replacementLemmas) {
try {
synthesized = synth.synthesize(new AnalyzedToken(replacementLemma, replacePOSTag, replacementLemma),
replacePOSTag);
} catch (IOException e) {
throw new RuntimeException("Could not synthesize: " + replacementLemma + " with tag " + replacePOSTag, e);
} // try with another gender
if (synthesized.length == 0) {
try {
String replacePOSTag2 = replacePOSTag.replaceAll("[MFC]S",".S").replaceAll("[MFC]P",".P");
synthesized = synth.synthesize(new AnalyzedToken(replacementLemma, replacePOSTag, replacementLemma), replacePOSTag2);
} catch (IOException e) {
throw new RuntimeException("Could not synthesize: " + replacementLemma + " with tag " + replacePOSTag, e);
}
} // add the suggestion without inflection
if (synthesized.length == 0 && replacementLemma.length()>1) {
possibleReplacements.add(replacementLemma);
} else {
possibleReplacements.addAll(Arrays.asList(synthesized));
}
}
}
if (bRuleMatches) {
RuleMatch potentialRuleMatch = createRuleMatch(tokens[i], possibleReplacements, sentence, originalLemma);
ruleMatches.add(potentialRuleMatch);
}
}
return toRuleMatchArray(ruleMatches);
| 255
| 637
| 892
|
<methods>public void <init>(java.util.ResourceBundle, org.languagetool.Language) ,public java.lang.String getDescription() ,public java.lang.String getId() ,public java.util.Locale getLocale() ,public java.lang.String getMessage(java.lang.String, List<java.lang.String>) ,public java.lang.String getShort() ,public org.languagetool.synthesis.Synthesizer getSynthesizer() ,public abstract Map<java.lang.String,List<java.lang.String>> getWrongWords() ,public boolean isCaseSensitive() ,public boolean isCheckLemmas() ,public org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public void setCheckLemmas(boolean) ,public void setIgnoreTaggedWords() ,public void useSubRuleSpecificIds() <variables>private boolean checkLemmas,protected boolean ignoreTaggedWords,private final non-sealed org.languagetool.Language language,private static final Logger logger,protected boolean subRuleSpecificIds
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/rules/ca/AnarASuggestionsFilter.java
|
AnarASuggestionsFilter
|
acceptRuleMatch
|
class AnarASuggestionsFilter extends RuleFilter {
static private CatalanSynthesizer synth = CatalanSynthesizer.INSTANCE;
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> arguments, int patternTokenPos,
AnalyzedTokenReadings[] patternTokens, List<Integer> tokenPositions) throws IOException {<FILL_FUNCTION_BODY>}
private String getLanguageVariantCode(RuleMatch match) {
PatternRule pr = (PatternRule) match.getRule();
return pr.getLanguage().getShortCodeWithCountryAndVariant();
}
}
|
int initPos = 0;
AnalyzedTokenReadings[] tokens = match.getSentence().getTokensWithoutWhitespace();
while (initPos < tokens.length
&& (tokens[initPos].getStartPos() < match.getFromPos() || tokens[initPos].isSentenceStart())) {
initPos++;
}
String verbPostag = tokens[initPos].readingWithTagRegex("V.IP.*").getPOSTag();
String lemma = tokens[initPos + 2].readingWithTagRegex("V.N.*").getLemma();
AnalyzedToken at = new AnalyzedToken("", "", lemma);
String newPostag = "V[MS]I[PF]" + verbPostag.substring(4, 8);
String[] synthForms = synth.synthesize(at, newPostag, true,
getLanguageVariantCode(match));
if (synthForms.length == 0) {
return null;
}
int adjustEndPos = 0;
String[] result = PronomsFeblesHelper.getTwoNextPronouns(tokens,initPos + 3);
String pronomsDarrere = result[0];
adjustEndPos += Integer.valueOf(result[1]);
List<String> replacements = new ArrayList<>();
for (String verb : synthForms) {
String suggestion = "";
if (!pronomsDarrere.isEmpty()) {
suggestion = PronomsFeblesHelper.transformDavant(pronomsDarrere, verb);
}
suggestion += verb;
suggestion = StringTools.preserveCase(suggestion, tokens[initPos].getToken());
replacements.add(suggestion);
}
if (replacements.isEmpty()) {
return null;
}
RuleMatch ruleMatch = new RuleMatch(match.getRule(), match.getSentence(), tokens[initPos].getStartPos(),
tokens[initPos + 2 + adjustEndPos].getEndPos(), match.getMessage(), match.getShortMessage());
ruleMatch.setType(match.getType());
ruleMatch.setSuggestedReplacements(replacements);
return ruleMatch;
| 156
| 538
| 694
|
<methods>public non-sealed void <init>() ,public abstract org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) throws java.io.IOException,public boolean matches(Map<java.lang.String,java.lang.String>, org.languagetool.AnalyzedTokenReadings[], int, List<java.lang.Integer>) throws java.io.IOException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/rules/ca/CatalanNumberSpellerFilter.java
|
CatalanNumberSpellerFilter
|
acceptRuleMatch
|
class CatalanNumberSpellerFilter extends RuleFilter {
private final Language language = new Catalan();
private final CatalanSynthesizer synth = (CatalanSynthesizer) language.getSynthesizer();
@Override
public RuleMatch acceptRuleMatch(RuleMatch match, Map<String, String> arguments, int patternTokenPos,
AnalyzedTokenReadings[] patternTokens, List<Integer> tokenPositions) {<FILL_FUNCTION_BODY>}
}
|
String strToSpell = getRequired("number_to_spell", arguments).replaceAll("\\.", "");
if (getRequired("gender", arguments).contentEquals("feminine")) {
strToSpell = "feminine " + strToSpell;
}
String spelledNumber = synth.getSpelledNumber(strToSpell);
if (!spelledNumber.isEmpty() && spelledNumber.replaceAll("-i-", " ").replaceAll("-", " ").split(" ").length < 4) {
String message = match.getMessage();
RuleMatch ruleMatch = new RuleMatch(match.getRule(), match.getSentence(), match.getFromPos(), match.getToPos(),
message, match.getShortMessage());
ruleMatch.setType(match.getType());
ruleMatch.setSuggestedReplacement(spelledNumber);
return ruleMatch;
} else {
return null;
}
| 122
| 236
| 358
|
<methods>public non-sealed void <init>() ,public abstract org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) throws java.io.IOException,public boolean matches(Map<java.lang.String,java.lang.String>, org.languagetool.AnalyzedTokenReadings[], int, List<java.lang.Integer>) throws java.io.IOException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/rules/ca/CatalanRepeatedWordsRule.java
|
CatalanRepeatedWordsRule
|
adjustPostag
|
class CatalanRepeatedWordsRule extends AbstractRepeatedWordsRule {
private final Supplier<List<DisambiguationPatternRule>> antiPatterns;
private static final List<List<PatternToken>> ANTI_PATTERNS = Arrays
.asList(
Arrays.asList(csRegex("[Tt]ema|TEMA"), csRegex("\\d+|[IXVC]+"))
);
@Override
public List<DisambiguationPatternRule> getAntiPatterns() {
return antiPatterns.get();
}
public CatalanRepeatedWordsRule(ResourceBundle messages) {
super(messages, new Catalan());
antiPatterns = cacheAntiPatterns(new Catalan(), ANTI_PATTERNS);
super.setTags(Arrays.asList(Tag.picky));
// super.setDefaultTempOff();
}
private static final Map<String, SynonymsData> wordsToCheck = loadWords("/ca/synonyms.txt");
@Override
protected String getMessage() {
return "Aquesta paraula apareix en una de les frases anteriors. Podeu substituir-la per un sinònim per a fer més variat el text, llevat que la repetició sigui intencionada.";
}
@Override
public String getDescription() {
return ("Sinònims per a paraules repetides.");
}
@Override
protected Map<String, SynonymsData> getWordsToCheck() {
return wordsToCheck;
}
@Override
protected String getShortMessage() {
return "Estil: paraula repetida";
}
@Override
protected Synthesizer getSynthesizer() {
return CatalanSynthesizer.INSTANCE;
}
@Override
protected String adjustPostag(String postag) {<FILL_FUNCTION_BODY>}
@Override
protected boolean isException(AnalyzedTokenReadings[] tokens, int i, boolean sentStart, boolean isCapitalized,
boolean isAllUppercase) {
if (isAllUppercase || (isCapitalized && !sentStart)) {
return true;
}
if (tokens[i].hasPosTagStartingWith("NP")) {
return true;
}
return false;
}
}
|
if (postag.contains("CN")) {
return postag.replaceFirst("CN", "..");
} else if (postag.contains("MS")) {
return postag.replaceFirst("MS", "[MC][SN]");
} else if (postag.contains("FS")) {
return postag.replaceFirst("FS", "[FC][SN]");
} else if (postag.contains("MP")) {
return postag.replaceFirst("MP", "[MC][PN]");
} else if (postag.contains("FP")) {
return postag.replaceFirst("FP", "[FC][PN]");
} else if (postag.contains("CS")) {
return postag.replaceFirst("CS", "[MFC][SN]"); // also F ?
} else if (postag.contains("CP")) {
return postag.replaceFirst("CP", "[MFC][PN]"); // also F ?
} else if (postag.contains("MN")) {
return postag.replaceFirst("MN", "[MC][SPN]");
} else if (postag.contains("FN")) {
return postag.replaceFirst("FN", "[FC][SPN]");
}
return postag;
| 590
| 300
| 890
|
<methods>public void <init>(java.util.ResourceBundle, org.languagetool.Language) ,public abstract java.lang.String getDescription() ,public java.lang.String getId() ,public org.languagetool.rules.RuleMatch[] match(List<org.languagetool.AnalyzedSentence>) throws java.io.IOException,public int minToCheckParagraph() <variables>private static final java.lang.String FILE_ENCODING,private static final java.util.regex.Pattern HASH_PATTERN,private static final java.util.regex.Pattern PUNCT_PATTERN,private final non-sealed org.languagetool.Language language,private final non-sealed java.lang.String ruleId
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/rules/ca/CatalanSuppressMisspelledSuggestionsFilter.java
|
CatalanSuppressMisspelledSuggestionsFilter
|
isMisspelled
|
class CatalanSuppressMisspelledSuggestionsFilter extends AbstractSuppressMisspelledSuggestionsFilter {
public CatalanSuppressMisspelledSuggestionsFilter() throws IOException {
}
@Override
public boolean isMisspelled(String s, Language language) throws IOException {<FILL_FUNCTION_BODY>}
}
|
SpellingCheckRule spellerRule = language.getDefaultSpellingRule();
if (spellerRule == null) {
return false;
}
List<AnalyzedSentence> sentences = language.createDefaultJLanguageTool().analyzeText(s);
return spellerRule.match(sentences.get(0)).length > 0;
| 89
| 85
| 174
|
<methods>public org.languagetool.rules.RuleMatch acceptRuleMatch(org.languagetool.rules.RuleMatch, Map<java.lang.String,java.lang.String>, int, org.languagetool.AnalyzedTokenReadings[], List<java.lang.Integer>) throws java.io.IOException,public boolean isMisspelled(java.lang.String, org.languagetool.Language) throws java.io.IOException<variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/rules/ca/CatalanUnpairedBracketsRule.java
|
CatalanUnpairedBracketsRule
|
getSuggestions
|
class CatalanUnpairedBracketsRule extends GenericUnpairedBracketsRule {
private static final List<String> CA_START_SYMBOLS = Arrays.asList("[", "(", "{", "“", "«", "\"", "'", "‘");
private static final List<String> CA_END_SYMBOLS = Arrays.asList("]", ")", "}", "”", "»", "\"", "'", "’");
//private static final Pattern NUMBER = Pattern.compile("[\\d,.]*\\d");
private static final Pattern VALID_BEFORE_CLOSING_PARENTHESIS = Pattern
.compile("\\d+|[a-zA-Z]", Pattern.UNICODE_CASE);
private static final Pattern NUMBER = Pattern.compile("\\d[\\d., ]+\\d|\\d{1,2}", Pattern.UNICODE_CASE);
public CatalanUnpairedBracketsRule(ResourceBundle messages, Language language) {
super(messages, CA_START_SYMBOLS, CA_END_SYMBOLS);
}
// @Override
// public String getId() {
// return "CA_UNPAIRED_BRACKETS";
// }
@Override
protected boolean isNoException(final String tokenStr,
final AnalyzedTokenReadings[] tokens, final int i, final int j,
final boolean precSpace, final boolean follSpace, UnsyncStack<SymbolLocator> symbolStack) {
if (i < 1) {
return true;
}
if ((tokenStr.equals("’") || tokenStr.equals("'"))
&& (tokens[i].hasPosTagStartingWith("N") || tokens[i].hasPosTagStartingWith("A"))) {
return false;
}
final boolean superException = !super.isNoException(tokenStr, tokens, i, j, precSpace, follSpace, symbolStack);
if (superException) {
return false;
}
//degrees, minutes, seconds...
if (("\"".equals(tokenStr) || "'".equals(tokenStr))
&& NUMBER.matcher(tokens[i - 1].getToken()).matches()
&& !tokens[i].isWhitespaceBefore()
&& ((i > 2 && (tokens[i - 2].getToken().contains("º") || tokens[i - 2].getToken().contains("°")))
|| (i > 4 && (tokens[i - 4].getToken().contains("º") || tokens[i - 4].getToken().contains("°"))))) {
return false;
}
if (i == 1 && tokenStr.equals("»"))
return false;
if (i > 1 && tokenStr.equals(")")) {
boolean isThereOpeningParenthesis = false;
int k=1;
while (i-k>0) {
if (tokens[i-k].getToken().equals(")"))
break;
if (tokens[i-k].getToken().equals("(")) {
isThereOpeningParenthesis=true;
break;
}
k++;
}
if (!isThereOpeningParenthesis) {
final Matcher mValidBeforeClosingParenthesis = VALID_BEFORE_CLOSING_PARENTHESIS
.matcher(tokens[i - 1].getToken());
if (mValidBeforeClosingParenthesis.matches())
return false;
}
}
return true;
}
protected List<String> getSuggestions(Supplier<String> text, int startPos, int endPos, Symbol symbol, String otherSymbol) {<FILL_FUNCTION_BODY>}
}
|
List<String> replacements = new ArrayList<>();
// add the other symbol together with the original symbol, needs to be moved by the user
if (symbol.symbolType == Symbol.Type.Closing) {
replacements.add(otherSymbol + symbol);
} else {
replacements.add(symbol + otherSymbol);
}
// add the option to remove the original symbol
replacements.add("");
return replacements;
| 958
| 115
| 1,073
|
<methods>public void <init>(java.lang.String, java.util.ResourceBundle, List<java.lang.String>, List<java.lang.String>) ,public void <init>(java.lang.String, java.util.ResourceBundle, List<java.lang.String>, List<java.lang.String>, java.util.regex.Pattern) ,public void <init>(java.util.ResourceBundle, List<java.lang.String>, List<java.lang.String>) ,public void <init>(java.util.ResourceBundle, List<java.lang.String>, List<java.lang.String>, java.util.regex.Pattern) ,public void <init>(java.util.ResourceBundle) ,public java.lang.String getDescription() ,public java.lang.String getId() ,public final org.languagetool.rules.RuleMatch[] match(List<org.languagetool.AnalyzedSentence>) ,public int minToCheckParagraph() <variables>private static final java.util.regex.Pattern NUMERALS_EN,private static final java.util.regex.Pattern PUNCTUATION,private static final java.util.regex.Pattern PUNCTUATION_NO_DOT,private final non-sealed List<java.lang.String> endSymbols,private final non-sealed java.util.regex.Pattern numerals,private final non-sealed java.lang.String ruleId,private final non-sealed List<java.lang.String> startSymbols,private final non-sealed Map<java.lang.String,java.lang.Boolean> uniqueMap
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/rules/ca/CatalanUnpairedQuestionMarksRule.java
|
CatalanUnpairedQuestionMarksRule
|
match
|
class CatalanUnpairedQuestionMarksRule extends TextLevelRule {
public CatalanUnpairedQuestionMarksRule(ResourceBundle messages, Language language) {
super();
setLocQualityIssueType(ITSIssueType.Style);
setDefaultOff();
}
protected String getStartSymbol() {
return "¿";
}
protected String getEndSymbol() {
return "?";
}
@Override
public int minToCheckParagraph() {
return 1;
}
@Override
public String getId() {
return "CA_UNPAIRED_QUESTION";
}
@Override
public String getDescription() {
return "Exigeix signe d'interrogació inicial";
}
@Override
public RuleMatch[] match(List<AnalyzedSentence> sentences) {<FILL_FUNCTION_BODY>}
private int hasTokenAtPos(String ch, AnalyzedTokenReadings[] tokens) {
int i = tokens.length - 1;
while (i > 0) {
if (tokens[i].getToken().equals(ch)) {
return i;
}
i--;
}
return -1;
}
}
|
List<RuleMatch> matches = new ArrayList<>();
int pos = 0;
for (AnalyzedSentence sentence : sentences) {
AnalyzedTokenReadings[] tokens = sentence.getTokensWithoutWhitespace();
int needsInvQuestionMarkAt = hasTokenAtPos(getEndSymbol(), tokens);
if (needsInvQuestionMarkAt > 1) {
boolean hasInvQuestionMark = false;
// boolean hasInvExlcMark = false;
AnalyzedTokenReadings firstToken = null;
for (int i = 0; i < tokens.length; i++) {
if (firstToken == null && !tokens[i].isSentenceStart()
&& !StringTools.isPunctuationMark(tokens[i].getToken())) {
firstToken = tokens[i];
}
if (tokens[i].getToken().equals(getStartSymbol()) && i < needsInvQuestionMarkAt) {
hasInvQuestionMark = true;
}
// possibly a sentence end
if (!tokens[i].isSentenceEnd() && tokens[i].getToken().equals(getEndSymbol())
&& i < needsInvQuestionMarkAt) {
firstToken = null;
}
// put the question mark in: ¿de què... ¿de quina
// put the question mark in: ¿de qué... ¿para cuál... ¿cómo...
if (i > 2 && i + 2 < tokens.length) {
if (tokens[i - 1].getToken().equals(",") && tokens[i].hasPosTag("CC") && tokens[i + 1].hasPosTag("SPS00")
&& (tokens[i + 2].hasPosTagStartingWith("PT") || tokens[i + 2].hasPosTagStartingWith("DT"))) {
firstToken = tokens[i];
}
if (tokens[i - 1].getToken().equals(",") && tokens[i].hasPosTag("SPS00")
&& (tokens[i + 1].hasPosTagStartingWith("PT") || tokens[i + 1].hasPosTagStartingWith("DT"))) {
firstToken = tokens[i];
}
if (tokens[i - 1].getToken().equals(",") && tokens[i].hasPosTag("CC")
&& (tokens[i + 1].hasPosTagStartingWith("PT") || tokens[i + 1].hasPosTagStartingWith("DT"))) {
firstToken = tokens[i];
}
if (tokens[i - 1].getToken().equals(",")
&& (tokens[i].hasPosTagStartingWith("PT") || tokens[i].hasPosTagStartingWith("DT"))) {
firstToken = tokens[i];
}
if (tokens[i - 1].getToken().equals(",") && tokens[i].hasPosTag("CC")
&& (tokens[i + 1].getToken().equals("no") || tokens[i + 1].getToken().equals("sí"))) {
firstToken = tokens[i];
}
}
if (i > 2 && i < tokens.length) {
if (tokens[i - 1].getToken().equals(",")
&& (tokens[i].getToken().equals("no") || tokens[i].getToken().equals("sí")
|| tokens[i].getToken().equals("oi") || tokens[i].getToken().equals("eh"))) {
firstToken = tokens[i];
}
}
}
if (firstToken != null) {
String s = null;
if (needsInvQuestionMarkAt > 1 && !hasInvQuestionMark) {
s = getStartSymbol();
}
if (s != null) { // && !prevSentEndsWithColon: skip sentences with ':' due to unclear sentence
// boundaries
String message = "Símbol sense parella: Sembla que falta un '" + s + "'";
RuleMatch match = new RuleMatch(this, sentence, pos + firstToken.getStartPos(),
pos + firstToken.getEndPos(), message);
match.setSuggestedReplacement(s + firstToken.getToken());
matches.add(match);
}
}
}
pos += sentence.getCorrectedTextLength();
// prevSentEndsWithColon = endsWithColon;
}
return toRuleMatchArray(matches);
| 316
| 1,087
| 1,403
|
<methods>public void <init>() ,public void <init>(java.util.ResourceBundle) ,public int estimateContextForSureMatch() ,public org.languagetool.rules.RuleMatch[] match(List<org.languagetool.AnalyzedSentence>, org.languagetool.markup.AnnotatedText) throws java.io.IOException,public abstract org.languagetool.rules.RuleMatch[] match(List<org.languagetool.AnalyzedSentence>) throws java.io.IOException,public final org.languagetool.rules.RuleMatch[] match(org.languagetool.AnalyzedSentence) throws java.io.IOException,public abstract int minToCheckParagraph() <variables>
|
languagetool-org_languagetool
|
languagetool/languagetool-language-modules/ca/src/main/java/org/languagetool/rules/ca/CatalanWordRepeatBeginningRule.java
|
CatalanWordRepeatBeginningRule
|
getSuggestions
|
class CatalanWordRepeatBeginningRule extends WordRepeatBeginningRule {
public CatalanWordRepeatBeginningRule(ResourceBundle messages, Language language) {
super(messages, language);
super.setTags(Collections.singletonList(Tag.picky));
//super.setDefaultTempOff();
addExamplePair(Example.wrong("Però el carrer és tot modernista. <marker>Però</marker> té nom de poeta."),
Example.fixed("Però el carrer és tot modernista. Així i tot, té nom de poeta."));
}
@Override
public String getId() {
return "CATALAN_WORD_REPEAT_BEGINNING_RULE";
}
// ==================== ADVERBS ======================
// adverbs used to add to what the previous sentence mentioned
private static final Set<String> ADD_ADVERBS = new HashSet<>();
// adverbs used to express contrast to what the previous sentence mentioned
private static final Set<String> CONTRAST_CONJ = new HashSet<>();
private static final Set<String> CAUSE_CONJ = new HashSet<>();
// adverbs used to express emphasis to what the previous sentence mentioned
private static final Set<String> EMPHASIS_ADVERBS = new HashSet<>();
// adverbs used to explain what the previous sentence mentioned
private static final Set<String> EXPLAIN_ADVERBS = new HashSet<>();
// personal pronouns
private static final Set<String> PERSONAL_PRONOUNS = new HashSet<>();
// ==================== EXPRESSIONS ======================
// the expressions will be used only as additional suggestions
// linking expressions that can be used instead of the ADD_ADVERBS
private static final List<String> ADD_EXPRESSIONS = Arrays.asList("Així mateix", "A més a més");
// linking expressions that can be used instead of the CONTRAST_ADVERBS
private static final List<String> CONTRAST_EXPRESSIONS = Arrays.asList("Així i tot", "D'altra banda",
"Per altra part");
private static final List<String> CAUSE_EXPRESSIONS = Arrays.asList("Ja que", "Per tal com", "Pel fet que",
"Puix que");
private static final List<String> EXCEPCIONS_START = Arrays.asList("l'", "el", "la", "els", "les", "punt", "article",
"mòdul", "part", "sessió", "unitat", "tema", "a", "per", "en", "com");
static {
// based on https://www.pinterest.com/pin/229542912245527548/
ADD_ADVERBS.add("Igualment");
ADD_ADVERBS.add("També");
ADD_ADVERBS.add("Addicionalment");
CONTRAST_CONJ.add("Però");
CONTRAST_CONJ.add("Emperò");
CONTRAST_CONJ.add("Mes");
CAUSE_CONJ.add("Perquè");
CAUSE_CONJ.add("Car");
EMPHASIS_ADVERBS.add("Òbviament");
EMPHASIS_ADVERBS.add("Clarament");
EMPHASIS_ADVERBS.add("Absolutament");
EMPHASIS_ADVERBS.add("Definitivament");
EXPLAIN_ADVERBS.add("Específicament");
EXPLAIN_ADVERBS.add("Concretament");
EXPLAIN_ADVERBS.add("Particularment");
EXPLAIN_ADVERBS.add("Precisament");
PERSONAL_PRONOUNS.add("jo");
PERSONAL_PRONOUNS.add("tu");
PERSONAL_PRONOUNS.add("ell");
PERSONAL_PRONOUNS.add("ella");
PERSONAL_PRONOUNS.add("nosaltres");
PERSONAL_PRONOUNS.add("vosaltres");
PERSONAL_PRONOUNS.add("ells");
PERSONAL_PRONOUNS.add("elles");
PERSONAL_PRONOUNS.add("vostè");
PERSONAL_PRONOUNS.add("vostès");
PERSONAL_PRONOUNS.add("vosté");
PERSONAL_PRONOUNS.add("vostés");
PERSONAL_PRONOUNS.add("vós");
}
@Override
public boolean isException(String token) {
return super.isException(token) || Character.isDigit(token.charAt(0))
|| EXCEPCIONS_START.contains(token.toLowerCase());
}
@Override
protected boolean isAdverb(AnalyzedTokenReadings token) {
if (token.hasPosTag("RG") || token.hasPosTag("LOC_ADV")) {
return true;
}
String tok = token.getToken();
return ADD_ADVERBS.contains(tok) || CONTRAST_CONJ.contains(tok) || EMPHASIS_ADVERBS.contains(tok)
|| EXPLAIN_ADVERBS.contains(tok) || CAUSE_CONJ.contains(tok);
}
@Override
protected List<String> getSuggestions(AnalyzedTokenReadings token) {<FILL_FUNCTION_BODY>}
/**
* Gives suggestions to replace the given adverb.
*
* @param adverb to get suggestions for
* @param adverbsOfCategory the adverbs of the same category as adverb (adverb
* is <b>required</b> to be contained in the Set)
* @return a List of suggested adverbs to replace the given adverb
*/
private List<String> getDifferentAdverbsOfSameCategory(String adverb, Set<String> adverbsOfCategory) {
return adverbsOfCategory.stream().filter(adv -> !adv.equals(adverb)).collect(Collectors.toList());
}
}
|
String tok = token.getToken();
String lowerTok = tok.toLowerCase();
// the repeated word is a personal pronoun
if (PERSONAL_PRONOUNS.contains(lowerTok)) {
return Arrays.asList("A més a més, " + lowerTok, "Igualment, " + lowerTok, "No sols aixó, sinó que " + lowerTok);
} else if (ADD_ADVERBS.contains(tok)) {
List<String> addSuggestions = getDifferentAdverbsOfSameCategory(tok, ADD_ADVERBS);
addSuggestions.addAll(ADD_EXPRESSIONS);
return addSuggestions;
} else if (CONTRAST_CONJ.contains(tok)) {
List<String> contrastSuggestions = new ArrayList<>(); // getDifferentAdverbsOfSameCategory(tok, CONTRAST_CONJ);
contrastSuggestions.addAll(CONTRAST_EXPRESSIONS);
return contrastSuggestions;
} else if (EMPHASIS_ADVERBS.contains(tok)) {
return getDifferentAdverbsOfSameCategory(tok, EMPHASIS_ADVERBS);
} else if (EXPLAIN_ADVERBS.contains(tok)) {
return getDifferentAdverbsOfSameCategory(tok, EXPLAIN_ADVERBS);
} else if (CAUSE_CONJ.contains(tok)) {
List<String> causeSuggestions = getDifferentAdverbsOfSameCategory(tok, CAUSE_CONJ);
causeSuggestions.addAll(CAUSE_EXPRESSIONS);
return causeSuggestions;
}
return Collections.emptyList();
| 1,573
| 423
| 1,996
|
<methods>public void <init>(java.util.ResourceBundle, org.languagetool.Language) ,public java.lang.String getDescription() ,public java.lang.String getId() ,public boolean isException(java.lang.String) ,public boolean isSentenceException(org.languagetool.AnalyzedSentence) ,public org.languagetool.rules.RuleMatch[] match(List<org.languagetool.AnalyzedSentence>) throws java.io.IOException,public int minToCheckParagraph() <variables>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.