1 // Copyright 2011 Google Inc. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 package com.hughes.android.dictionary.parser;
17 import java.io.BufferedInputStream;
18 import java.io.DataInputStream;
19 import java.io.EOFException;
21 import java.io.FileInputStream;
22 import java.io.IOException;
23 import java.util.ArrayList;
24 import java.util.Arrays;
25 import java.util.Collection;
26 import java.util.LinkedHashSet;
27 import java.util.List;
30 import java.util.logging.Logger;
31 import java.util.regex.Pattern;
33 import com.hughes.android.dictionary.engine.EntryTypeName;
34 import com.hughes.android.dictionary.engine.IndexBuilder;
35 import com.hughes.android.dictionary.engine.IndexedEntry;
36 import com.hughes.android.dictionary.engine.PairEntry;
37 import com.hughes.android.dictionary.engine.PairEntry.Pair;
39 public class EnWiktionaryXmlParser {
41 static final Logger LOG = Logger.getLogger(EnWiktionaryXmlParser.class.getName());
43 // TODO: process {{ttbc}} lines
45 static final Pattern partOfSpeechHeader = Pattern.compile(
46 "Noun|Verb|Adjective|Adverb|Pronoun|Conjunction|Interjection|" +
47 "Preposition|Proper noun|Article|Prepositional phrase|Acronym|" +
48 "Abbreviation|Initialism|Contraction|Prefix|Suffix|Symbol|Letter|" +
49 "Ligature|Idiom|Phrase|" +
50 // These are @deprecated:
51 "Noun form|Verb form|Adjective form|Nominal phrase|Noun phrase|" +
52 "Verb phrase|Transitive verb|Intransitive verb|Reflexive verb|" +
53 // These are extras I found:
54 "Determiner|Numeral|Number|Cardinal number|Ordinal number|Proverb|" +
55 "Particle|Interjection|Pronominal adverb" +
56 "Han character|Hanzi|Hanja|Kanji|Katakana character|Syllable");
58 final IndexBuilder enIndexBuilder;
59 final IndexBuilder otherIndexBuilder;
60 final Pattern langPattern;
61 final Pattern langCodePattern;
64 public EnWiktionaryXmlParser(final IndexBuilder enIndexBuilder, final IndexBuilder otherIndexBuilder, final Pattern langPattern, final Pattern langCodePattern, final boolean swap) {
65 this.enIndexBuilder = enIndexBuilder;
66 this.otherIndexBuilder = otherIndexBuilder;
67 this.langPattern = langPattern;
68 this.langCodePattern = langCodePattern;
73 public void parse(final File file, final int pageLimit) throws IOException {
75 final DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
77 if (pageLimit >= 0 && pageCount >= pageLimit) {
83 title = dis.readUTF();
84 } catch (EOFException e) {
88 final String heading = dis.readUTF();
89 final int bytesLength = dis.readInt();
90 final byte[] bytes = new byte[bytesLength];
92 final String text = new String(bytes, "UTF8");
94 parseSection(title, heading, text);
97 if (pageCount % 1000 == 0) {
98 LOG.info("pageCount=" + pageCount);
103 private void parseSection(final String title, String heading, final String text) {
104 if (title.startsWith("Wiktionary:") ||
105 title.startsWith("Template:") ||
106 title.startsWith("Appendix:") ||
107 title.startsWith("Category:") ||
108 title.startsWith("Index:") ||
109 title.startsWith("MediaWiki:") ||
110 title.startsWith("TransWiki:") ||
111 title.startsWith("Citations:") ||
112 title.startsWith("Concordance:") ||
113 title.startsWith("Help:")) {
117 heading = heading.replaceAll("=", "").trim();
118 if (heading.equals("English")) {
119 doEnglishWord(title, text);
120 } else if (langPattern.matcher(heading).matches()){
121 doForeignWord(title, text);
126 // -------------------------------------------------------------------------
128 private void doEnglishWord(String title, String text) {
133 final WikiTokenizer wikiTokenizer = new WikiTokenizer(text);
134 while (wikiTokenizer.nextToken() != null) {
136 if (wikiTokenizer.isHeading()) {
137 final String headerName = wikiTokenizer.headingWikiText();
139 if (wikiTokenizer.headingDepth() <= posDepth) {
144 if (partOfSpeechHeader.matcher(headerName).matches()) {
145 posDepth = wikiTokenizer.headingDepth();
146 pos = wikiTokenizer.headingWikiText();
147 } else if (headerName.equals("Translations")) {
149 LOG.warning("Translations without POS: " + title);
151 doTranslations(title, wikiTokenizer, pos);
152 } else if (headerName.equals("Pronunciation")) {
153 //doPronunciation(wikiLineReader);
160 private static Set<String> encodings = new LinkedHashSet<String>(Arrays.asList("zh-ts",
161 "sd-Arab", "ku-Arab", "Arab", "unicode", "Laoo", "ur-Arab", "Thai",
162 "fa-Arab", "Khmr", "zh-tsp", "Cyrl", "IPAchar", "ug-Arab", "ko-inline",
163 "Jpan", "Kore", "Hebr", "rfscript", "Beng", "Mong", "Knda", "Cyrs",
164 "yue-tsj", "Mlym", "Tfng", "Grek", "yue-yue-j"));
166 private void doTranslations(final String title, final WikiTokenizer wikiTokenizer, final String pos) {
167 if (title.equals("absolutely")) {
168 System.out.println();
172 boolean done = false;
173 while (wikiTokenizer.nextToken() != null) {
174 if (wikiTokenizer.isHeading()) {
175 wikiTokenizer.returnToLineStart();
182 // Check whether we care about this line:
184 //line = WikiLineReader.removeSquareBrackets(line);
186 if (wikiTokenizer.isFunction()) {
187 final String functionName = wikiTokenizer.functionName();
188 final List<String> positionArgs = wikiTokenizer.functionPositionArgs();
190 if (functionName.equals("trans-top")) {
192 if (wikiTokenizer.functionPositionArgs().size() >= 1) {
193 sense = positionArgs.get(0);
194 // TODO: could emphasize words in [[brackets]] inside sense.
195 sense = WikiTokenizer.toPlainText(sense);
196 //LOG.info("Sense: " + sense);
198 } else if (functionName.equals("trans-bottom")) {
200 } else if (functionName.equals("trans-mid")) {
201 } else if (functionName.equals("trans-see")) {
202 // TODO: would also be nice...
203 } else if (functionName.startsWith("picdic")) {
204 } else if (functionName.startsWith("checktrans")) {
205 } else if (functionName.startsWith("ttbc")) {
206 wikiTokenizer.nextLine();
207 // TODO: would be great to handle ttbc
208 // TODO: Check this: done = true;
210 LOG.warning("Unexpected translation wikifunction: " + wikiTokenizer.token() + ", title=" + title);
212 } else if (wikiTokenizer.isListItem()) {
213 final String line = wikiTokenizer.listItemWikiText();
214 // This line could produce an output...
216 // First strip the language and check whether it matches.
217 // And hold onto it for sub-lines.
218 final int colonIndex = line.indexOf(":");
219 if (colonIndex == -1) {
223 final String lang = line.substring(0, colonIndex);
224 if (!this.langPattern.matcher(lang).find()) {
228 String rest = line.substring(colonIndex + 1).trim();
229 if (rest.length() > 0) {
230 doTranslationLine(line, title, pos, sense, rest);
232 // TODO: do lines that are like "Greek:"
235 } else if (wikiTokenizer.remainderStartsWith("''See''")) {
236 wikiTokenizer.nextLine();
237 LOG.fine("Skipping line: " + wikiTokenizer.token());
238 } else if (wikiTokenizer.isWikiLink()) {
239 final String wikiLink = wikiTokenizer.wikiLinkText();
240 if (wikiLink.contains(":") && wikiLink.contains(title)) {
241 } else if (wikiLink.contains("Category:")) {
243 LOG.warning("Unexpected wikiLink: " + wikiTokenizer.token() + ", title=" + title);
245 } else if (wikiTokenizer.isNewline() || wikiTokenizer.isMarkup() || wikiTokenizer.isComment()) {
247 final String token = wikiTokenizer.token();
248 if (token.equals("----")) {
250 LOG.warning("Unexpected translation token: " + wikiTokenizer.token() + ", title=" + title);
257 private static <T> T get(final List<T> list, final int index) {
258 return index < list.size() ? list.get(index) : null;
261 private void doTranslationLine(final String line, final String title, final String pos, final String sense, final String rest) {
262 // Good chance we'll actually file this one...
263 final PairEntry pairEntry = new PairEntry();
264 final IndexedEntry indexedEntry = new IndexedEntry(pairEntry);
266 final StringBuilder otherText = new StringBuilder();
267 final WikiTokenizer wikiTokenizer = new WikiTokenizer(rest, false);
268 while (wikiTokenizer.nextToken() != null) {
270 if (wikiTokenizer.isPlainText()) {
271 final String plainText = wikiTokenizer.token();
272 otherText.append("").append(plainText);
273 otherIndexBuilder.addEntryWithString(indexedEntry, plainText, EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
275 } else if (wikiTokenizer.isWikiLink()) {
276 final String plainText = wikiTokenizer.wikiLinkText();
277 otherText.append("").append(plainText);
278 otherIndexBuilder.addEntryWithString(indexedEntry, plainText, EntryTypeName.WIKTIONARY_TRANSLATION_WIKI_TEXT);
280 } else if (wikiTokenizer.isFunction()) {
281 final String functionName = wikiTokenizer.functionName();
282 final List<String> args = wikiTokenizer.functionPositionArgs();
283 final Map<String,String> namedArgs = wikiTokenizer.functionNamedArgs();
285 if (functionName.equals("t") || functionName.equals("t+") || functionName.equals("t-") || functionName.equals("tø") || functionName.equals("apdx-t")) {
286 if (args.size() < 2) {
287 LOG.warning("{{t}} with too few args: " + line + ", title=" + title);
290 final String langCode = get(args, 0);
291 //if (this.langCodePattern.matcher(langCode).matches()) {
292 final String word = get(args, 1);
293 final String gender = get(args, 2);
294 final String transliteration = namedArgs.get("tr");
295 if (otherText.length() > 0) {
296 otherText.append("");
298 otherText.append(word);
299 otherIndexBuilder.addEntryWithString(indexedEntry, word, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
300 if (gender != null) {
301 otherText.append(String.format(" {%s}", gender));
303 if (transliteration != null) {
304 otherText.append(String.format(" (tr. %s)", transliteration));
305 otherIndexBuilder.addEntryWithString(indexedEntry, transliteration, EntryTypeName.WIKTIONARY_TRANSLITERATION);
308 } else if (functionName.equals("qualifier")) {
309 if (args.size() == 0) {
310 otherText.append(wikiTokenizer.token());
312 String qualifier = args.get(0);
313 if (!namedArgs.isEmpty() || args.size() > 1) {
314 LOG.warning("weird qualifier: " + line);
317 otherText.append("(").append(qualifier).append(")");
319 } else if (encodings.contains(functionName)) {
320 otherText.append("").append(args.get(0));
321 otherIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
322 } else if (isGender(functionName)) {
323 appendGender(otherText, functionName, args);
324 } else if (functionName.equals("g")) {
325 otherText.append("{g}");
326 } else if (functionName.equals("l")) {
327 // encodes text in various langs.
329 otherText.append("").append(args.get(1));
330 otherIndexBuilder.addEntryWithString(indexedEntry, args.get(1), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
331 // TODO: transliteration
332 } else if (functionName.equals("term")) {
333 // cross-reference to another dictionary
334 otherText.append("").append(args.get(0));
335 otherIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
336 // TODO: transliteration
337 } else if (functionName.equals("italbrac") || functionName.equals("gloss")) {
338 // TODO: put this text aside to use it.
339 otherText.append("[").append(args.get(0)).append("]");
340 otherIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
341 } else if (functionName.equals("ttbc")) {
342 LOG.warning("Unexpected {{ttbc}}");
343 } else if (functionName.equals("trreq")) {
344 } else if (functionName.equals("not used")) {
345 otherText.append("(not used)");
346 } else if (functionName.equals("t-image")) {
347 // American sign language
350 otherText.append(wikiTokenizer.token());
353 } else if (wikiTokenizer.isNewline()) {
355 } else if (wikiTokenizer.isComment()) {
356 } else if (wikiTokenizer.isMarkup()) {
358 LOG.warning("Bad translation token: " + wikiTokenizer.token());
361 if (otherText.length() == 0) {
362 LOG.warning("Empty otherText: " + line);
366 StringBuilder englishText = new StringBuilder();
368 englishText.append(title);
370 englishText.append(" (").append(sense).append(")");
371 enIndexBuilder.addEntryWithString(indexedEntry, sense, EntryTypeName.WIKTIONARY_TRANSLATION_SENSE, EntryTypeName.WIKTIONARY_TRANSLATION_SENSE);
374 englishText.append(" (").append(pos.toLowerCase()).append(")");
376 enIndexBuilder.addEntryWithString(indexedEntry, title, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
378 final Pair pair = new Pair(trim(englishText.toString()), trim(otherText.toString()), swap);
379 pairEntry.pairs.add(pair);
380 if (!pairsAdded.add(pair.toString())) {
381 LOG.warning("Duplicate pair: " + pair.toString());
383 if (pair.toString().equals("libero {m} :: free (adjective)")) {
384 System.out.println();
390 private void appendGender(final StringBuilder otherText,
391 final String functionName, final List<String> args) {
392 otherText.append("{");
393 otherText.append(functionName);
394 for (int i = 0; i < args.size(); ++i) {
395 otherText.append("|").append(args.get(i));
397 otherText.append("}");
401 private boolean isGender(final String functionName) {
402 return functionName.equals("m") || functionName.equals("f") || functionName.equals("n") || functionName.equals("p");
405 Set<String> pairsAdded = new LinkedHashSet<String>();
407 // -------------------------------------------------------------------------
409 private void doForeignWord(final String title, final String text) {
410 final WikiTokenizer wikiTokenizer = new WikiTokenizer(text);
411 while (wikiTokenizer.nextToken() != null) {
412 if (wikiTokenizer.isHeading()) {
413 final String headingName = wikiTokenizer.headingWikiText();
414 if (headingName.equals("Translations")) {
415 LOG.warning("Translations not in English section: " + title);
416 } else if (headingName.equals("Pronunciation")) {
417 //doPronunciation(wikiLineReader);
418 } else if (partOfSpeechHeader.matcher(headingName).matches()) {
419 doForeignPartOfSpeech(title, headingName, wikiTokenizer.headingDepth(), wikiTokenizer);
426 static final class ListSection {
427 final String firstPrefix;
428 final String firstLine;
429 final List<String> nextPrefixes = new ArrayList<String>();
430 final List<String> nextLines = new ArrayList<String>();
432 public ListSection(String firstPrefix, String firstLine) {
433 this.firstPrefix = firstPrefix;
434 this.firstLine = firstLine;
438 public String toString() {
439 return firstPrefix + firstLine + "{ " + nextPrefixes + "}";
444 int foreignCount = 0;
445 private void doForeignPartOfSpeech(String title, final String posHeading, final int posDepth, WikiTokenizer wikiTokenizer) {
446 if (++foreignCount % 1000 == 0) {
447 LOG.info("***" + title + ", pos=" + posHeading + ", foreignCount=" + foreignCount);
449 if (title.equals("moro")) {
450 System.out.println();
453 final StringBuilder foreignBuilder = new StringBuilder();
454 final Collection<String> wordForms = new ArrayList<String>();
455 final List<ListSection> listSections = new ArrayList<ListSection>();
459 ListSection lastListSection = null;
461 int currentHeadingDepth = posDepth;
462 while (wikiTokenizer.nextToken() != null) {
463 if (wikiTokenizer.isHeading()) {
464 currentHeadingDepth = wikiTokenizer.headingDepth();
466 if (currentHeadingDepth <= posDepth) {
467 wikiTokenizer.returnToLineStart();
472 if (currentHeadingDepth > posDepth) {
473 // TODO: deal with other neat info sections
477 if (wikiTokenizer.isFunction()) {
478 final String name = wikiTokenizer.functionName();
479 final List<String> args = wikiTokenizer.functionPositionArgs();
480 final Map<String,String> namedArgs = wikiTokenizer.functionNamedArgs();
481 // First line is generally a repeat of the title with some extra information.
482 // We need to build up the left side (foreign text, tokens) separately from the
483 // right side (English). The left-side may get paired with multiple right sides.
484 // The left side should get filed under every form of the word in question (singular, plural).
486 // For verbs, the conjugation comes later on in a deeper section.
487 // Ideally, we'd want to file every English entry with the verb
488 // under every verb form coming from the conjugation.
489 // Ie. under "fa": see: "make :: fare" and "do :: fare"
490 // But then where should we put the conjugation table?
491 // I think just under fare. But then we need a way to link to the entry (actually the row, since entries doesn't show up!)
492 // for the conjugation table from "fa".
493 // Would like to be able to link to a lang#token.
494 if (isGender(name)) {
495 appendGender(foreignBuilder, name, args);
496 } else if (name.equals("wikipedia")) {
497 namedArgs.remove("lang");
498 if (args.size() > 1 || !namedArgs.isEmpty()) {
500 foreignBuilder.append(wikiTokenizer.token());
501 } else if (args.size() == 1) {
502 foreignBuilder.append(wikiTokenizer.token());
504 //foreignBuilder.append(title);
506 } else if (name.equals("it-noun")) {
507 final String base = get(args, 0);
508 final String gender = get(args, 1);
509 final String singular = base + get(args, 2);
510 final String plural = base + get(args, 3);
511 foreignBuilder.append(String.format(" %s {%s}, %s {pl}", singular, gender, plural, plural));
512 wordForms.add(singular);
513 wordForms.add(plural);
514 } else if (name.equals("it-proper noun")) {
515 foreignBuilder.append(wikiTokenizer.token());
516 } else if (name.equals("it-adj")) {
517 foreignBuilder.append(wikiTokenizer.token());
518 } else if (name.startsWith("it-conj")) {
519 if (name.equals("it-conj-are")) {
520 itConjAre(args, namedArgs);
521 } else if (name.equals("it-conj-ere")) {
522 } else if (name.equals("it-conj-ire")) {
524 LOG.warning("Unknown conjugation: " + wikiTokenizer.token());
528 foreignBuilder.append(wikiTokenizer.token());
529 // LOG.warning("Unknown function: " + wikiTokenizer.token());
532 } else if (wikiTokenizer.isListItem()) {
533 final String prefix = wikiTokenizer.listItemPrefix();
534 if (lastListSection != null &&
535 prefix.startsWith(lastListSection.firstPrefix) &&
536 prefix.length() > lastListSection.firstPrefix.length()) {
537 lastListSection.nextPrefixes.add(prefix);
538 lastListSection.nextLines.add(wikiTokenizer.listItemWikiText());
540 lastListSection = new ListSection(prefix, wikiTokenizer.listItemWikiText());
541 listSections.add(lastListSection);
543 } else if (lastListSection != null) {
544 // Don't append anything after the lists, because there's crap.
545 } else if (wikiTokenizer.isWikiLink()) {
547 foreignBuilder.append(wikiTokenizer.wikiLinkText());
549 } else if (wikiTokenizer.isPlainText()) {
551 foreignBuilder.append(wikiTokenizer.token());
553 } else if (wikiTokenizer.isMarkup() || wikiTokenizer.isNewline() || wikiTokenizer.isComment()) {
556 LOG.warning("Unexpected token: " + wikiTokenizer.token());
561 // Here's where we exit.
562 // Should we make an entry even if there are no foreign list items?
563 String foreign = foreignBuilder.toString().trim();
564 if (!foreign.toLowerCase().startsWith(title.toLowerCase())) {
565 foreign = title + " " + foreign;
567 for (final ListSection listSection : listSections) {
568 doForeignListItem(foreign, title, wordForms, listSection);
574 static final Pattern UNINDEXED_WIKI_TEXT = Pattern.compile(
575 "(first|second|third)-person (singular|plural)|" +
581 private void doForeignListItem(final String foreignText, String title, final Collection<String> forms, final ListSection listSection) {
583 final String prefix = listSection.firstPrefix;
584 if (prefix.length() > 1) {
585 // Could just get looser and say that any prefix longer than first is a sublist.
586 LOG.warning("Prefix too long: " + listSection);
590 final PairEntry pairEntry = new PairEntry();
591 final IndexedEntry indexedEntry = new IndexedEntry(pairEntry);
593 final StringBuilder englishBuilder = new StringBuilder();
595 final String mainLine = listSection.firstLine;
597 final WikiTokenizer englishTokenizer = new WikiTokenizer(mainLine, false);
598 while (englishTokenizer.nextToken() != null) {
599 // TODO handle form of....
600 if (englishTokenizer.isPlainText()) {
601 englishBuilder.append(englishTokenizer.token());
602 enIndexBuilder.addEntryWithString(indexedEntry, englishTokenizer.token(), EntryTypeName.WIKTIONARY_ENGLISH_DEF);
603 } else if (englishTokenizer.isWikiLink()) {
604 final String text = englishTokenizer.wikiLinkText();
605 final String link = englishTokenizer.wikiLinkDest();
607 if (link.contains("#English")) {
608 englishBuilder.append(text);
609 enIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
610 } else if (link.contains("#") && this.langPattern.matcher(link).find()) {
611 englishBuilder.append(text);
612 otherIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_OTHER_LANG);
613 } else if (link.equals("plural")) {
614 englishBuilder.append(text);
616 //LOG.warning("Special link: " + englishTokenizer.token());
617 enIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
618 englishBuilder.append(text);
622 englishBuilder.append(text);
623 if (!UNINDEXED_WIKI_TEXT.matcher(text).find()) {
624 enIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
627 } else if (englishTokenizer.isFunction()) {
628 final String name = englishTokenizer.functionName();
629 if (name.contains("conjugation of ") ||
630 name.contains("form of ") ||
631 name.contains("feminine of ") ||
632 name.contains("plural of ")) {
633 // Ignore these in the index, they're really annoying....
634 englishBuilder.append(englishTokenizer.token());
636 englishBuilder.append(englishTokenizer.token());
637 // LOG.warning("Unexpected function: " + englishTokenizer.token());
640 if (englishTokenizer.isComment() || englishTokenizer.isMarkup()) {
642 LOG.warning("Unexpected definition text: " + englishTokenizer.token());
647 final String english = trim(englishBuilder.toString());
648 if (english.length() > 0) {
649 final Pair pair = new Pair(english, trim(foreignText), this.swap);
650 pairEntry.pairs.add(pair);
651 otherIndexBuilder.addEntryWithString(indexedEntry, title, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
652 for (final String form : forms) {
653 otherIndexBuilder.addEntryWithString(indexedEntry, form, EntryTypeName.WIKTIONARY_FORM_SINGLE, EntryTypeName.WIKTIONARY_FORM_MULTI);
658 String lastForeign = null;
659 for (int i = 0; i < listSection.nextPrefixes.size(); ++i) {
660 final String nextPrefix = listSection.nextPrefixes.get(i);
661 final String nextLine = listSection.nextLines.get(i);
662 int dash = nextLine.indexOf("—");
665 dash = nextLine.indexOf("—");
669 dash = nextLine.indexOf(" - ");
673 if ((nextPrefix.equals("#:") || nextPrefix.equals("##:")) && dash != -1) {
674 final String foreignEx = nextLine.substring(0, dash);
675 final String englishEx = nextLine.substring(dash + mdashLen);
676 final Pair pair = new Pair(formatAndIndexExampleString(englishEx, enIndexBuilder, indexedEntry), formatAndIndexExampleString(foreignEx, otherIndexBuilder, indexedEntry), swap);
677 if (pair.lang1 != "--" && pair.lang1 != "--") {
678 pairEntry.pairs.add(pair);
681 } else if (nextPrefix.equals("#:") || nextPrefix.equals("##:")){
682 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
683 lastForeign = nextLine;
684 if (pair.lang1 != "--" && pair.lang1 != "--") {
685 pairEntry.pairs.add(pair);
687 } else if (nextPrefix.equals("#::") || nextPrefix.equals("#**")) {
688 if (lastForeign != null) {
689 pairEntry.pairs.remove(pairEntry.pairs.size() - 1);
690 final Pair pair = new Pair(formatAndIndexExampleString(nextLine, enIndexBuilder, indexedEntry), formatAndIndexExampleString(lastForeign, otherIndexBuilder, indexedEntry), swap);
691 if (pair.lang1 != "--" && pair.lang1 != "--") {
692 pairEntry.pairs.add(pair);
695 LOG.warning("English example with no foreign: " + title + ", " + nextLine);
697 } else if (nextPrefix.equals("#*")) {
698 // Can't really index these.
699 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
700 lastForeign = nextLine;
701 if (pair.lang1 != "--" && pair.lang1 != "--") {
702 pairEntry.pairs.add(pair);
704 } else if (nextPrefix.equals("#::*") || nextPrefix.equals("##") || nextPrefix.equals("#*:") || nextPrefix.equals("#:*") || true) {
705 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
706 if (pair.lang1 != "--" && pair.lang1 != "--") {
707 pairEntry.pairs.add(pair);
715 private String formatAndIndexExampleString(final String example, final IndexBuilder indexBuilder, final IndexedEntry indexedEntry) {
716 final WikiTokenizer wikiTokenizer = new WikiTokenizer(example, false);
717 final StringBuilder builder = new StringBuilder();
718 boolean insideTripleQuotes = false;
719 while (wikiTokenizer.nextToken() != null) {
720 if (wikiTokenizer.isPlainText()) {
721 builder.append(wikiTokenizer.token());
722 if (indexBuilder != null) {
723 indexBuilder.addEntryWithString(indexedEntry, wikiTokenizer.token(), EntryTypeName.WIKTIONARY_EXAMPLE);
725 } else if (wikiTokenizer.isWikiLink()) {
726 final String text = wikiTokenizer.wikiLinkText().replaceAll("'", "");
727 builder.append(text);
728 if (indexBuilder != null) {
729 indexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_EXAMPLE);
731 } else if (wikiTokenizer.isFunction()) {
732 builder.append(wikiTokenizer.token());
733 } else if (wikiTokenizer.isMarkup()) {
734 if (wikiTokenizer.token().equals("'''")) {
735 insideTripleQuotes = !insideTripleQuotes;
737 } else if (wikiTokenizer.isComment() || wikiTokenizer.isNewline()) {
740 LOG.warning("unexpected token: " + wikiTokenizer.token());
743 final String result = trim(builder.toString());
744 return result.length() > 0 ? result : "--";
748 private void itConjAre(List<String> args, Map<String, String> namedArgs) {
749 final String base = args.get(0);
750 final String aux = args.get(1);
752 putIfMissing(namedArgs, "inf", base + "are");
753 putIfMissing(namedArgs, "aux", aux);
754 putIfMissing(namedArgs, "ger", base + "ando");
755 putIfMissing(namedArgs, "presp", base + "ante");
756 putIfMissing(namedArgs, "pastp", base + "ato");
758 putIfMissing(namedArgs, "pres1s", base + "o");
759 putIfMissing(namedArgs, "pres2s", base + "i");
760 putIfMissing(namedArgs, "pres3s", base + "a");
761 putIfMissing(namedArgs, "pres1p", base + "iamo");
762 putIfMissing(namedArgs, "pres2p", base + "ate");
763 putIfMissing(namedArgs, "pres3p", base + "ano");
765 putIfMissing(namedArgs, "imperf1s", base + "avo");
766 putIfMissing(namedArgs, "imperf2s", base + "avi");
767 putIfMissing(namedArgs, "imperf3s", base + "ava");
768 putIfMissing(namedArgs, "imperf1p", base + "avamo");
769 putIfMissing(namedArgs, "imperf2p", base + "avate");
770 putIfMissing(namedArgs, "imperf3p", base + "avano");
772 putIfMissing(namedArgs, "prem1s", base + "ai");
773 putIfMissing(namedArgs, "prem2s", base + "asti");
774 putIfMissing(namedArgs, "prem3s", base + "ò");
775 putIfMissing(namedArgs, "prem1p", base + "ammo");
776 putIfMissing(namedArgs, "prem2p", base + "aste");
777 putIfMissing(namedArgs, "prem3p", base + "arono");
779 putIfMissing(namedArgs, "fut1s", base + "erò");
780 putIfMissing(namedArgs, "fut2s", base + "erai");
781 putIfMissing(namedArgs, "fut3s", base + "erà");
782 putIfMissing(namedArgs, "fut1p", base + "eremo");
783 putIfMissing(namedArgs, "fut2p", base + "erete");
784 putIfMissing(namedArgs, "fut3p", base + "eranno");
786 putIfMissing(namedArgs, "cond1s", base + "erei");
787 putIfMissing(namedArgs, "cond2s", base + "eresti");
788 putIfMissing(namedArgs, "cond3s", base + "erebbe");
789 putIfMissing(namedArgs, "cond1p", base + "eremmo");
790 putIfMissing(namedArgs, "cond2p", base + "ereste");
791 putIfMissing(namedArgs, "cond3p", base + "erebbero");
792 // Subjunctive / congiuntivo
793 putIfMissing(namedArgs, "sub123s", base + "i");
794 putIfMissing(namedArgs, "sub1p", base + "iamo");
795 putIfMissing(namedArgs, "sub2p", base + "iate");
796 putIfMissing(namedArgs, "sub3p", base + "ino");
797 // Imperfect subjunctive
798 putIfMissing(namedArgs, "impsub12s", base + "assi");
799 putIfMissing(namedArgs, "impsub3s", base + "asse");
800 putIfMissing(namedArgs, "impsub1p", base + "assimo");
801 putIfMissing(namedArgs, "impsub2p", base + "aste");
802 putIfMissing(namedArgs, "impsub3p", base + "assero");
804 putIfMissing(namedArgs, "imp2s", base + "a");
805 putIfMissing(namedArgs, "imp3s", base + "i");
806 putIfMissing(namedArgs, "imp1p", base + "iamo");
807 putIfMissing(namedArgs, "imp2p", base + "ate");
808 putIfMissing(namedArgs, "imp3p", base + "ino");
811 itConj(args, namedArgs);
815 private void itConj(List<String> args, Map<String, String> namedArgs) {
816 // TODO Auto-generated method stub
821 private static void putIfMissing(final Map<String, String> namedArgs, final String key,
822 final String value) {
823 final String oldValue = namedArgs.get(key);
824 if (oldValue == null || oldValue.length() == 0) {
825 namedArgs.put(key, value);
829 // TODO: check how ='' and =| are manifested....
830 // TODO: get this right in -are
831 private static void putOrNullify(final Map<String, String> namedArgs, final String key,
832 final String value) {
833 final String oldValue = namedArgs.get(key);
834 if (oldValue == null/* || oldValue.length() == 0*/) {
835 namedArgs.put(key, value);
837 if (oldValue.equals("''")) {
838 namedArgs.put(key, "");
843 static final Pattern whitespace = Pattern.compile("\\s+");
844 static String trim(final String s) {
845 return whitespace.matcher(s).replaceAll(" ").trim();