1 package com.hughes.android.dictionary.parser;
3 import java.io.BufferedInputStream;
4 import java.io.DataInputStream;
5 import java.io.EOFException;
7 import java.io.FileInputStream;
8 import java.io.IOException;
9 import java.util.ArrayList;
10 import java.util.Arrays;
11 import java.util.Collection;
12 import java.util.Collections;
13 import java.util.LinkedHashSet;
14 import java.util.List;
17 import java.util.regex.Pattern;
19 import com.hughes.android.dictionary.engine.EntryTypeName;
20 import com.hughes.android.dictionary.engine.IndexBuilder;
21 import com.hughes.android.dictionary.engine.IndexedEntry;
22 import com.hughes.android.dictionary.engine.PairEntry;
23 import com.hughes.android.dictionary.engine.PairEntry.Pair;
25 public class EnWiktionaryXmlParser {
27 // TODO: look for {{ and [[ and <adf> <!-- in output.
28 // TODO: process {{ttbc}} lines
30 static final Pattern partOfSpeechHeader = Pattern.compile(
31 "Noun|Verb|Adjective|Adverb|Pronoun|Conjunction|Interjection|" +
32 "Preposition|Proper noun|Article|Prepositional phrase|Acronym|" +
33 "Abbreviation|Initialism|Contraction|Prefix|Suffix|Symbol|Letter|" +
34 "Ligature|Idiom|Phrase|" +
35 // These are @deprecated:
36 "Noun form|Verb form|Adjective form|Nominal phrase|Noun phrase|" +
37 "Verb phrase|Transitive verb|Intransitive verb|Reflexive verb|" +
38 // These are extras I found:
39 "Determiner|Numeral|Number|Cardinal number|Ordinal number|Proverb|" +
40 "Particle|Interjection|Pronominal adverb" +
41 "Han character|Hanzi|Hanja|Kanji|Katakana character|Syllable");
43 final IndexBuilder enIndexBuilder;
44 final IndexBuilder otherIndexBuilder;
45 final Pattern langPattern;
46 final Pattern langCodePattern;
49 public EnWiktionaryXmlParser(final IndexBuilder enIndexBuilder, final IndexBuilder otherIndexBuilder, final Pattern langPattern, final Pattern langCodePattern, final boolean swap) {
50 this.enIndexBuilder = enIndexBuilder;
51 this.otherIndexBuilder = otherIndexBuilder;
52 this.langPattern = langPattern;
53 this.langCodePattern = langCodePattern;
58 public void parse(final File file, final int pageLimit) throws IOException {
60 final DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
62 if (pageLimit >= 0 && pageCount >= pageLimit) {
68 title = dis.readUTF();
69 } catch (EOFException e) {
73 final String heading = dis.readUTF();
74 final int bytesLength = dis.readInt();
75 final byte[] bytes = new byte[bytesLength];
77 final String text = new String(bytes, "UTF8");
79 parseSection(title, heading, text);
82 if (pageCount % 1000 == 0) {
83 System.out.println("pageCount=" + pageCount);
88 private void parseSection(final String title, String heading, final String text) {
89 if (title.startsWith("Wiktionary:") ||
90 title.startsWith("Template:") ||
91 title.startsWith("Appendix:") ||
92 title.startsWith("Category:") ||
93 title.startsWith("Index:") ||
94 title.startsWith("MediaWiki:") ||
95 title.startsWith("TransWiki:") ||
96 title.startsWith("Citations:") ||
97 title.startsWith("Concordance:") ||
98 title.startsWith("Help:")) {
102 heading = heading.replaceAll("=", "").trim();
103 if (heading.equals("English")) {
104 doEnglishWord(title, text);
105 } else if (langPattern.matcher(heading).matches()){
106 doForeignWord(title, text);
111 // -------------------------------------------------------------------------
116 private void doEnglishWord(String title, String text) {
117 final WikiTokenizer wikiTokenizer = new WikiTokenizer(text);
118 while (wikiTokenizer.nextToken() != null) {
120 if (wikiTokenizer.isHeading()) {
121 final String headerName = wikiTokenizer.headingWikiText();
123 if (wikiTokenizer.headingDepth() <= posDepth) {
128 if (partOfSpeechHeader.matcher(headerName).matches()) {
129 posDepth = wikiTokenizer.headingDepth();
130 pos = wikiTokenizer.headingWikiText();
131 } else if (headerName.equals("Translations")) {
132 doTranslations(title, wikiTokenizer);
133 } else if (headerName.equals("Pronunciation")) {
134 //doPronunciation(wikiLineReader);
141 private static Set<String> encodings = new LinkedHashSet<String>(Arrays.asList("zh-ts",
142 "sd-Arab", "ku-Arab", "Arab", "unicode", "Laoo", "ur-Arab", "Thai",
143 "fa-Arab", "Khmr", "zh-tsp", "Cyrl", "IPAchar", "ug-Arab", "ko-inline",
144 "Jpan", "Kore", "Hebr", "rfscript", "Beng", "Mong", "Knda", "Cyrs",
145 "yue-tsj", "Mlym", "Tfng", "Grek", "yue-yue-j"));
147 private void doTranslations(final String title, final WikiTokenizer wikiTokenizer) {
149 boolean done = false;
150 while (wikiTokenizer.nextToken() != null) {
151 if (wikiTokenizer.isHeading()) {
152 wikiTokenizer.returnToLineStart();
159 // Check whether we care about this line:
161 //line = WikiLineReader.removeSquareBrackets(line);
163 if (wikiTokenizer.isFunction()) {
164 final String functionName = wikiTokenizer.functionName();
165 final List<String> positionArgs = wikiTokenizer.functionPositionArgs();
167 if (functionName.equals("trans-top")) {
169 if (wikiTokenizer.functionPositionArgs().size() >= 1) {
170 sense = positionArgs.get(0);
171 // TODO: could emphasize words in [[brackets]] inside sense.
172 sense = WikiTokenizer.toPlainText(sense);
173 //System.out.println("Sense: " + sense);
175 } else if (functionName.equals("trans-bottom")) {
177 } else if (functionName.equals("trans-mid")) {
178 } else if (functionName.equals("trans-see")) {
179 } else if (functionName.startsWith("checktrans")) {
180 //TODO: Check this: done = true;
182 System.err.println("Unexpected translation wikifunction: " + wikiTokenizer.token() + ", title=" + title);
184 } else if (wikiTokenizer.isListItem() && wikiTokenizer.listItemPrefix().startsWith("*")) {
185 final String line = wikiTokenizer.listItemWikiText();
186 // This line could produce an output...
188 // First strip the language and check whether it matches.
189 // And hold onto it for sub-lines.
190 final int colonIndex = line.indexOf(":");
191 if (colonIndex == -1) {
195 final String lang = line.substring(0, colonIndex);
196 if (!this.langPattern.matcher(lang).find()) {
200 String rest = line.substring(colonIndex + 1).trim();
201 doTranslationLine(line, title, sense, rest);
203 } else if (wikiTokenizer.remainderStartsWith("''See''")) {
204 wikiTokenizer.nextLine();
205 System.out.println("Skipping line: " + wikiTokenizer.token());
206 } else if (wikiTokenizer.isWikiLink()) {
207 final String wikiLink = wikiTokenizer.wikiLinkText();
208 if (wikiLink.contains(":") && wikiLink.contains(title)) {
209 } else if (wikiLink.contains("Category:")) {
211 System.err.println("Unexpected wikiLink: " + wikiTokenizer.token() + ", title=" + title);
213 } else if (wikiTokenizer.isNewline() || wikiTokenizer.isMarkup() || wikiTokenizer.isComment()) {
215 final String token = wikiTokenizer.token();
216 if (token.equals("----")) {
218 System.err.println("Unexpected translation token: " + wikiTokenizer.token() + ", title=" + title);
225 private static <T> T get(final List<T> list, final int index) {
226 return index < list.size() ? list.get(index) : null;
229 private void doTranslationLine(final String line, final String title, final String sense, final String rest) {
230 // Good chance we'll actually file this one...
231 final PairEntry pairEntry = new PairEntry();
232 final IndexedEntry indexedEntry = new IndexedEntry(pairEntry);
234 final StringBuilder otherText = new StringBuilder();
235 final WikiTokenizer wikiTokenizer = new WikiTokenizer(rest);
236 while (wikiTokenizer.nextToken() != null) {
238 if (wikiTokenizer.isPlainText()) {
239 final String plainText = wikiTokenizer.token();
240 otherText.append("").append(plainText);
241 otherIndexBuilder.addEntryWithString(indexedEntry, plainText, EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
243 } else if (wikiTokenizer.isWikiLink()) {
244 final String plainText = wikiTokenizer.wikiLinkText();
245 otherText.append("").append(plainText);
246 otherIndexBuilder.addEntryWithString(indexedEntry, plainText, EntryTypeName.WIKTIONARY_TRANSLATION_WIKI_TEXT);
248 } else if (wikiTokenizer.isFunction()) {
249 final String functionName = wikiTokenizer.functionName();
250 final List<String> args = wikiTokenizer.functionPositionArgs();
251 final Map<String,String> namedArgs = wikiTokenizer.functionNamedArgs();
253 if (functionName.equals("t") || functionName.equals("t+") || functionName.equals("t-") || functionName.equals("tø")) {
254 if (args.size() < 2) {
255 System.err.println("{{t}} with too few args: " + line + ", title=" + title);
258 final String langCode = get(args, 0);
259 if (this.langCodePattern.matcher(langCode).matches()) {
260 final String word = get(args, 1);
261 final String gender = get(args, 2);
262 final String transliteration = namedArgs.get("tr");
263 if (otherText.length() > 0) {
264 otherText.append("");
266 otherText.append(word);
267 otherIndexBuilder.addEntryWithString(indexedEntry, word, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
268 if (gender != null) {
269 otherText.append(String.format(" {%s}", gender));
271 if (transliteration != null) {
272 otherText.append(String.format(" (tr. %s)", transliteration));
273 otherIndexBuilder.addEntryWithString(indexedEntry, transliteration, EntryTypeName.WIKTIONARY_TRANSLITERATION);
276 } else if (functionName.equals("qualifier")) {
277 String qualifier = args.get(0);
278 if (!namedArgs.isEmpty() || args.size() > 1) {
279 System.err.println("weird qualifier: " + line);
281 otherText.append("(").append(qualifier).append(")");
282 } else if (encodings.contains(functionName)) {
283 otherText.append("").append(args.get(0));
284 otherIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
285 } else if (functionName.equals("m") || functionName.equals("f") || functionName.equals("n") || functionName.equals("p")) {
286 otherText.append("{");
287 otherText.append(functionName);
288 for (int i = 0; i < args.size(); ++i) {
289 otherText.append("|").append(args.get(i));
291 otherText.append("}");
292 } else if (functionName.equals("g")) {
293 otherText.append("{g}");
294 } else if (functionName.equals("l")) {
295 // encodes text in various langs.
297 otherText.append("").append(args.get(1));
298 otherIndexBuilder.addEntryWithString(indexedEntry, args.get(1), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
299 // TODO: transliteration
300 } else if (functionName.equals("term")) {
301 // cross-reference to another dictionary
302 otherText.append("").append(args.get(0));
303 otherIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
304 // TODO: transliteration
305 } else if (functionName.equals("italbrac") || functionName.equals("gloss")) {
306 // TODO: put this text aside to use it.
307 otherText.append("[").append(args.get(0)).append("]");
308 otherIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
309 } else if (functionName.equals("ttbc")) {
310 } else if (functionName.equals("trreq")) {
311 } else if (functionName.equals("not used")) {
312 otherText.append("(not used)");
313 } else if (functionName.equals("t-image")) {
314 // American sign language
315 } else if (args.isEmpty() && namedArgs.isEmpty()) {
316 otherText.append("{UNK. FUNC.: ").append(functionName).append("}");
318 System.err.println("Unexpected t+- wikifunction: " + line + ", title=" + title);
321 } else if (wikiTokenizer.isNewline()) {
323 } else if (wikiTokenizer.isComment()) {
324 } else if (wikiTokenizer.isMarkup()) {
326 System.err.println("Bad translation token: " + wikiTokenizer.token());
331 StringBuilder englishText = new StringBuilder();
333 englishText.append(title);
335 englishText.append(" (").append(sense).append(")");
336 enIndexBuilder.addEntryWithString(indexedEntry, sense, EntryTypeName.WIKTIONARY_TRANSLATION_SENSE, EntryTypeName.WIKTIONARY_TRANSLATION_SENSE);
339 englishText.append(" (").append(pos.toLowerCase()).append(")");
341 enIndexBuilder.addEntryWithString(indexedEntry, title, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
343 final Pair pair = new Pair(trim(englishText.toString()), trim(otherText.toString()), swap);
344 pairEntry.pairs.add(pair);
345 assert (pairsAdded.add(pair.toString()));
346 if (pair.toString().equals("libero {m} :: free (adjective)")) {
347 System.out.println();
352 static final Pattern whitespace = Pattern.compile("\\s+");
354 static String trim(final String s) {
355 return whitespace.matcher(s).replaceAll(" ").trim();
358 Set<String> pairsAdded = new LinkedHashSet<String>();
360 // -------------------------------------------------------------------------
362 private void doForeignWord(final String title, final String text) {
363 final WikiTokenizer wikiTokenizer = new WikiTokenizer(text);
364 while (wikiTokenizer.nextToken() != null) {
365 if (wikiTokenizer.isHeading()) {
366 final String headingName = wikiTokenizer.headingWikiText();
367 if (headingName.equals("Translations")) {
368 System.err.println("Translations not in English section: " + title);
369 } else if (headingName.equals("Pronunciation")) {
370 //doPronunciation(wikiLineReader);
371 } else if (partOfSpeechHeader.matcher(headingName).matches()) {
372 doPartOfSpeech(title, headingName, wikiTokenizer.headingDepth(), wikiTokenizer);
380 private void doPartOfSpeech(String title, final String posHeading, final int posDepth, WikiTokenizer wikiTokenizer) {
381 System.out.println("***" + title);
382 System.out.println(posHeading);
383 //final StringBuilder foreignBuilder = new StringBuilder();
386 Collection<String> forms = Collections.emptyList();
388 int currentHeadingDepth = posDepth;
389 while (wikiTokenizer.nextToken() != null) {
390 if (wikiTokenizer.isHeading()) {
391 currentHeadingDepth = wikiTokenizer.headingDepth();
393 if (currentHeadingDepth <= posDepth) {
394 wikiTokenizer.returnToLineStart();
399 if (currentHeadingDepth > posDepth) {
404 if (wikiTokenizer.isFunction()) {
405 final String name = wikiTokenizer.functionName();
406 final List<String> args = wikiTokenizer.functionPositionArgs();
407 final Map<String,String> namedArgs = wikiTokenizer.functionNamedArgs();
408 // First line is generally a repeat of the title with some extra information.
409 // We need to build up the left side (foreign text, tokens) separately from the
410 // right side (English). The left-side may get paired with multiple right sides.
411 // The left side should get filed under every form of the word in question (singular, plural).
413 // For verbs, the conjugation comes later on in a deeper section.
414 // Ideally, we'd want to file every English entry with the verb
415 // under every verb form coming from the conjugation.
416 // Ie. under "fa": see: "make :: fare" and "do :: fare"
417 // But then where should we put the conjugation table?
418 // I think just under fare. But then we need a way to link to the entry (actually the row, since entries doesn't show up!)
419 // for the conjugation table from "fa".
420 // Would like to be able to link to a lang#token.
421 if (name.equals("it-noun")) {
422 assert forms.isEmpty();
423 final String base = get(args, 0);
424 final String gender = get(args, 1);
425 final String singular = base + get(args, 2);
426 final String plural = base + get(args, 3);
427 side = String.format("%s {%s}, %s {pl}", singular, gender, plural, plural);
428 forms = Arrays.asList(singular, plural);
429 } else if (name.equals("it-proper noun")) {
431 } else if (name.equals("it-adj")) {
433 } else if (name.startsWith("it-conj")) {
434 if (name.equals("it-conj-are")) {
435 itConjAre(args, namedArgs);
436 } else if (name.equals("it-conj-ere")) {
437 } else if (name.equals("it-conj-ire")) {
439 System.err.println("Unknown conjugation: " + wikiTokenizer.token());
443 System.err.println("Unknown function: " + wikiTokenizer.token());
446 } else if (wikiTokenizer.isListItem()) {
447 handleForeignListItem(side != null ? side : title, title, forms, wikiTokenizer);
449 } else if (wikiTokenizer.isWikiLink()) {
457 private void itConjAre(List<String> args, Map<String, String> namedArgs) {
458 final String base = args.get(0);
459 final String aux = args.get(1);
461 putIfMissing(namedArgs, "inf", base + "are");
462 putIfMissing(namedArgs, "aux", aux);
463 putIfMissing(namedArgs, "ger", base + "ando");
464 putIfMissing(namedArgs, "presp", base + "ante");
465 putIfMissing(namedArgs, "pastp", base + "ato");
467 putIfMissing(namedArgs, "pres1s", base + "o");
468 putIfMissing(namedArgs, "pres2s", base + "i");
469 putIfMissing(namedArgs, "pres3s", base + "a");
470 putIfMissing(namedArgs, "pres1p", base + "iamo");
471 putIfMissing(namedArgs, "pres2p", base + "ate");
472 putIfMissing(namedArgs, "pres3p", base + "ano");
474 putIfMissing(namedArgs, "imperf1s", base + "avo");
475 putIfMissing(namedArgs, "imperf2s", base + "avi");
476 putIfMissing(namedArgs, "imperf3s", base + "ava");
477 putIfMissing(namedArgs, "imperf1p", base + "avamo");
478 putIfMissing(namedArgs, "imperf2p", base + "avate");
479 putIfMissing(namedArgs, "imperf3p", base + "avano");
481 putIfMissing(namedArgs, "prem1s", base + "ai");
482 putIfMissing(namedArgs, "prem2s", base + "asti");
483 putIfMissing(namedArgs, "prem3s", base + "ò");
484 putIfMissing(namedArgs, "prem1p", base + "ammo");
485 putIfMissing(namedArgs, "prem2p", base + "aste");
486 putIfMissing(namedArgs, "prem3p", base + "arono");
488 putIfMissing(namedArgs, "fut1s", base + "erò");
489 putIfMissing(namedArgs, "fut2s", base + "erai");
490 putIfMissing(namedArgs, "fut3s", base + "erà");
491 putIfMissing(namedArgs, "fut1p", base + "eremo");
492 putIfMissing(namedArgs, "fut2p", base + "erete");
493 putIfMissing(namedArgs, "fut3p", base + "eranno");
495 putIfMissing(namedArgs, "cond1s", base + "erei");
496 putIfMissing(namedArgs, "cond2s", base + "eresti");
497 putIfMissing(namedArgs, "cond3s", base + "erebbe");
498 putIfMissing(namedArgs, "cond1p", base + "eremmo");
499 putIfMissing(namedArgs, "cond2p", base + "ereste");
500 putIfMissing(namedArgs, "cond3p", base + "erebbero");
501 // Subjunctive / congiuntivo
502 putIfMissing(namedArgs, "sub123s", base + "i");
503 putIfMissing(namedArgs, "sub1p", base + "iamo");
504 putIfMissing(namedArgs, "sub2p", base + "iate");
505 putIfMissing(namedArgs, "sub3p", base + "ino");
506 // Imperfect subjunctive
507 putIfMissing(namedArgs, "impsub12s", base + "assi");
508 putIfMissing(namedArgs, "impsub3s", base + "asse");
509 putIfMissing(namedArgs, "impsub1p", base + "assimo");
510 putIfMissing(namedArgs, "impsub2p", base + "aste");
511 putIfMissing(namedArgs, "impsub3p", base + "assero");
513 putIfMissing(namedArgs, "imp2s", base + "a");
514 putIfMissing(namedArgs, "imp3s", base + "i");
515 putIfMissing(namedArgs, "imp1p", base + "iamo");
516 putIfMissing(namedArgs, "imp2p", base + "ate");
517 putIfMissing(namedArgs, "imp3p", base + "ino");
520 itConj(args, namedArgs);
524 private void putIfMissing(final Map<String, String> namedArgs, final String key,
525 final String value) {
526 final String oldValue = namedArgs.get(key);
527 if (oldValue == null || oldValue.length() == 0) {
528 namedArgs.put(key, value);
532 // TODO: check how ='' and =| are manifested....
534 private void putOrNullify(final Map<String, String> namedArgs, final String key,
535 final String value) {
536 final String oldValue = namedArgs.get(key);
537 if (oldValue == null/* || oldValue.length() == 0*/) {
538 namedArgs.put(key, value);
540 if (oldValue.equals("''")) {
541 namedArgs.put(key, "");
546 final List<String> listPrefixes = new ArrayList<String>();
547 final List<String> listLines = new ArrayList<String>();
549 static final Pattern UNINDEXED_WIKI_TEXT = Pattern.compile(
550 "(first|second|third)-person (singular|plural)|" +
555 private void handleForeignListItem(final String foreignText, String title, final Collection<String> forms, final WikiTokenizer wikiTokenizer) {
557 final String prefix = wikiTokenizer.listItemPrefix();
558 if (prefix.length() > 1) {
559 System.err.println("Prefix too long: " + wikiTokenizer.token());
563 listPrefixes.clear();
565 listPrefixes.add(prefix);
566 listLines.add(wikiTokenizer.listItemWikiText());
567 while(wikiTokenizer.nextToken() != null &&
568 wikiTokenizer.isNewline() ||
569 wikiTokenizer.isComment() ||
570 (wikiTokenizer.isListItem() &&
571 wikiTokenizer.listItemPrefix().length() > prefix.length() &&
572 wikiTokenizer.listItemPrefix().startsWith(prefix))) {
573 if (wikiTokenizer.isListItem()) {
574 listPrefixes.add(wikiTokenizer.listItemPrefix());
575 listLines.add(wikiTokenizer.listItemWikiText());
578 if (wikiTokenizer.nextToken() != null) {
579 wikiTokenizer.returnToLineStart();
581 System.out.println("list lines: " + listLines);
582 System.out.println("list prefixes: " + listPrefixes);
584 final PairEntry pairEntry = new PairEntry();
585 final IndexedEntry indexedEntry = new IndexedEntry(pairEntry);
587 final String foreign = trim(title);
589 final StringBuilder englishBuilder = new StringBuilder();
591 final String mainLine = listLines.get(0);
593 final WikiTokenizer englishTokenizer = new WikiTokenizer(mainLine);
594 while (englishTokenizer.nextToken() != null) {
595 // TODO handle form of....
596 if (englishTokenizer.isPlainText()) {
597 englishBuilder.append(englishTokenizer.token());
598 enIndexBuilder.addEntryWithString(indexedEntry, englishTokenizer.token(), EntryTypeName.WIKTIONARY_ENGLISH_DEF);
599 } else if (englishTokenizer.isWikiLink()) {
600 final String text = englishTokenizer.wikiLinkText();
601 final String link = englishTokenizer.wikiLinkDest();
603 if (link.contains("#English")) {
604 englishBuilder.append(text);
605 enIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
606 } else if (link.contains("#") && this.langPattern.matcher(link).find()) {
607 englishBuilder.append(text);
608 otherIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_OTHER_LANG);
610 System.err.println("Special link: " + englishTokenizer.token());
611 // TODO: something here...
615 englishBuilder.append(text);
616 if (!UNINDEXED_WIKI_TEXT.matcher(text).find()) {
617 enIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
620 } else if (englishTokenizer.isFunction()) {
621 final String name = englishTokenizer.functionName();
622 if (name.contains(" conjugation of ") ||
623 name.contains(" form of ") ||
624 name.contains(" feminine of ") ||
625 name.contains(" plural of ")) {
626 // Ignore these in the index, they're really annoying....
627 englishBuilder.append(englishTokenizer.token());
629 System.err.println("Unexpected function: " + englishTokenizer.token());
632 if (englishTokenizer.isComment() || englishTokenizer.isMarkup()) {
634 System.err.println("Unexpected definition text: " + englishTokenizer.token());
638 final String english = trim(englishBuilder.toString());
639 if (english.length() > 0) {
640 final Pair pair = new Pair(english, trim(foreignText), this.swap);
641 pairEntry.pairs.add(pair);
642 otherIndexBuilder.addEntryWithString(indexedEntry, title, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
643 for (final String form : forms) {
644 otherIndexBuilder.addEntryWithString(indexedEntry, form, EntryTypeName.WIKTIONARY_FORM_SINGLE, EntryTypeName.WIKTIONARY_FORM_MULTI);