1 // Copyright 2011 Google Inc. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 package com.hughes.android.dictionary.parser;
17 import java.io.BufferedInputStream;
18 import java.io.DataInputStream;
19 import java.io.EOFException;
21 import java.io.FileInputStream;
22 import java.io.IOException;
23 import java.util.ArrayList;
24 import java.util.Arrays;
25 import java.util.Collection;
26 import java.util.LinkedHashSet;
27 import java.util.List;
30 import java.util.logging.Logger;
31 import java.util.regex.Pattern;
33 import com.hughes.android.dictionary.engine.EntryTypeName;
34 import com.hughes.android.dictionary.engine.IndexBuilder;
35 import com.hughes.android.dictionary.engine.IndexedEntry;
36 import com.hughes.android.dictionary.engine.PairEntry;
37 import com.hughes.android.dictionary.engine.PairEntry.Pair;
39 public class EnWiktionaryXmlParser {
41 private static final String TRANSLITERATION_FORMAT = " (tr. %s)";
43 static final Logger LOG = Logger.getLogger(EnWiktionaryXmlParser.class.getName());
45 // TODO: process {{ttbc}} lines
47 static final Pattern partOfSpeechHeader = Pattern.compile(
48 "Noun|Verb|Adjective|Adverb|Pronoun|Conjunction|Interjection|" +
49 "Preposition|Proper noun|Article|Prepositional phrase|Acronym|" +
50 "Abbreviation|Initialism|Contraction|Prefix|Suffix|Symbol|Letter|" +
51 "Ligature|Idiom|Phrase|\\{\\{acronym\\}\\}|\\{\\{initialism\\}\\}|" +
52 // These are @deprecated:
53 "Noun form|Verb form|Adjective form|Nominal phrase|Noun phrase|" +
54 "Verb phrase|Transitive verb|Intransitive verb|Reflexive verb|" +
55 // These are extras I found:
56 "Determiner|Numeral|Number|Cardinal number|Ordinal number|Proverb|" +
57 "Particle|Interjection|Pronominal adverb" +
58 "Han character|Hanzi|Hanja|Kanji|Katakana character|Syllable");
60 final IndexBuilder enIndexBuilder;
61 final IndexBuilder foreignIndexBuilder;
62 final Pattern langPattern;
63 final Pattern langCodePattern;
66 public EnWiktionaryXmlParser(final IndexBuilder enIndexBuilder, final IndexBuilder otherIndexBuilder, final Pattern langPattern, final Pattern langCodePattern, final boolean swap) {
67 this.enIndexBuilder = enIndexBuilder;
68 this.foreignIndexBuilder = otherIndexBuilder;
69 this.langPattern = langPattern;
70 this.langCodePattern = langCodePattern;
75 public void parse(final File file, final int pageLimit) throws IOException {
77 final DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
79 if (pageLimit >= 0 && pageCount >= pageLimit) {
85 title = dis.readUTF();
86 } catch (EOFException e) {
90 final String heading = dis.readUTF();
91 final int bytesLength = dis.readInt();
92 final byte[] bytes = new byte[bytesLength];
94 final String text = new String(bytes, "UTF8");
96 parseSection(title, heading, text);
99 if (pageCount % 1000 == 0) {
100 LOG.info("pageCount=" + pageCount);
105 private void parseSection(final String title, String heading, final String text) {
106 if (title.startsWith("Wiktionary:") ||
107 title.startsWith("Template:") ||
108 title.startsWith("Appendix:") ||
109 title.startsWith("Category:") ||
110 title.startsWith("Index:") ||
111 title.startsWith("MediaWiki:") ||
112 title.startsWith("TransWiki:") ||
113 title.startsWith("Citations:") ||
114 title.startsWith("Concordance:") ||
115 title.startsWith("Help:")) {
119 heading = heading.replaceAll("=", "").trim();
120 if (heading.equals("English")) {
121 doEnglishWord(title, text);
122 } else if (langPattern.matcher(heading).find()){
123 doForeignWord(heading, title, text);
128 // -------------------------------------------------------------------------
130 private void doEnglishWord(String title, String text) {
135 final WikiTokenizer wikiTokenizer = new WikiTokenizer(text);
136 while (wikiTokenizer.nextToken() != null) {
138 if (wikiTokenizer.isHeading()) {
139 final String headerName = wikiTokenizer.headingWikiText();
141 if (wikiTokenizer.headingDepth() <= posDepth) {
146 if (partOfSpeechHeader.matcher(headerName).matches()) {
147 posDepth = wikiTokenizer.headingDepth();
148 pos = wikiTokenizer.headingWikiText();
149 // TODO: if we're inside the POS section, we should handle the first title line...
151 } else if (headerName.equals("Translations")) {
153 LOG.warning("Translations without POS: " + title);
155 doTranslations(title, wikiTokenizer, pos);
156 } else if (headerName.equals("Pronunciation")) {
157 //doPronunciation(wikiLineReader);
164 private static Set<String> encodings = new LinkedHashSet<String>(Arrays.asList("zh-ts",
165 "sd-Arab", "ku-Arab", "Arab", "unicode", "Laoo", "ur-Arab", "Thai",
166 "fa-Arab", "Khmr", "zh-tsp", "Cyrl", "IPAchar", "ug-Arab", "ko-inline",
167 "Jpan", "Kore", "Hebr", "rfscript", "Beng", "Mong", "Knda", "Cyrs",
168 "yue-tsj", "Mlym", "Tfng", "Grek", "yue-yue-j"));
170 private void doTranslations(final String title, final WikiTokenizer wikiTokenizer, final String pos) {
171 if (title.equals("absolutely")) {
172 //System.out.println();
175 String topLevelLang = null;
177 boolean done = false;
178 while (wikiTokenizer.nextToken() != null) {
179 if (wikiTokenizer.isHeading()) {
180 wikiTokenizer.returnToLineStart();
187 // Check whether we care about this line:
189 if (wikiTokenizer.isFunction()) {
190 final String functionName = wikiTokenizer.functionName();
191 final List<String> positionArgs = wikiTokenizer.functionPositionArgs();
193 if (functionName.equals("trans-top")) {
195 if (wikiTokenizer.functionPositionArgs().size() >= 1) {
196 sense = positionArgs.get(0);
197 // TODO: could emphasize words in [[brackets]] inside sense.
198 sense = WikiTokenizer.toPlainText(sense);
199 //LOG.info("Sense: " + sense);
201 } else if (functionName.equals("trans-bottom")) {
203 } else if (functionName.equals("trans-mid")) {
204 } else if (functionName.equals("trans-see")) {
205 // TODO: would also be nice...
206 } else if (functionName.startsWith("picdic")) {
207 } else if (functionName.startsWith("checktrans")) {
209 } else if (functionName.startsWith("ttbc")) {
210 wikiTokenizer.nextLine();
211 // TODO: would be great to handle ttbc
212 // TODO: Check this: done = true;
214 LOG.warning("Unexpected translation wikifunction: " + wikiTokenizer.token() + ", title=" + title);
216 } else if (wikiTokenizer.isListItem()) {
217 final String line = wikiTokenizer.listItemWikiText();
218 // This line could produce an output...
220 if (line.contains("ich hoan dich gear")) {
221 //System.out.println();
224 // First strip the language and check whether it matches.
225 // And hold onto it for sub-lines.
226 final int colonIndex = line.indexOf(":");
227 if (colonIndex == -1) {
231 final String lang = trim(WikiTokenizer.toPlainText(line.substring(0, colonIndex)));
232 final boolean appendLang;
233 if (wikiTokenizer.listItemPrefix().length() == 1) {
235 final boolean thisFind = langPattern.matcher(lang).find();
239 appendLang = !langPattern.matcher(lang).matches();
240 } else if (topLevelLang == null) {
243 // Two-level -- the only way we won't append is if this second level matches exactly.
244 if (!langPattern.matcher(lang).matches() && !langPattern.matcher(topLevelLang).find()) {
247 appendLang = !langPattern.matcher(lang).matches();
250 String rest = line.substring(colonIndex + 1).trim();
251 if (rest.length() > 0) {
252 doTranslationLine(line, appendLang ? lang : null, title, pos, sense, rest);
255 } else if (wikiTokenizer.remainderStartsWith("''See''")) {
256 wikiTokenizer.nextLine();
257 LOG.fine("Skipping See line: " + wikiTokenizer.token());
258 } else if (wikiTokenizer.isWikiLink()) {
259 final String wikiLink = wikiTokenizer.wikiLinkText();
260 if (wikiLink.contains(":") && wikiLink.contains(title)) {
261 } else if (wikiLink.contains("Category:")) {
263 LOG.warning("Unexpected wikiLink: " + wikiTokenizer.token() + ", title=" + title);
265 } else if (wikiTokenizer.isNewline() || wikiTokenizer.isMarkup() || wikiTokenizer.isComment()) {
267 final String token = wikiTokenizer.token();
268 if (token.equals("----")) {
270 LOG.warning("Unexpected translation token: " + wikiTokenizer.token() + ", title=" + title);
277 private static <T> T get(final List<T> list, final int index, final T defaultValue) {
278 return index < list.size() ? list.get(index) : defaultValue;
281 private static <T> T get(final List<T> list, final int index) {
282 return get(list, index, null);
285 private static <T> T remove(final List<T> list, final int index, final T defaultValue) {
286 return index < list.size() ? list.remove(index) : defaultValue;
290 static final class AppendAndIndexCallback implements WikiTokenizer.Callback {
291 public AppendAndIndexCallback(
292 final StringBuilder builder,
293 final IndexedEntry indexedEntry,
294 final IndexBuilder defaultIndexBuilder,
295 final Map<String, WikiFunctionCallback> functionCallbacks) {
296 this.indexedEntry = indexedEntry;
297 this.defaultIndexBuilder = defaultIndexBuilder;
298 this.builder = builder;
299 this.functionCallbacks = functionCallbacks;
302 final StringBuilder builder;
303 final IndexedEntry indexedEntry;
304 IndexBuilder defaultIndexBuilder;
305 final Map<String,WikiFunctionCallback> functionCallbacks;
307 // TODO: the classes of text are wrong....
310 public void onPlainText(WikiTokenizer wikiTokenizer) {
311 // The only non-recursive callback. Just appends to the builder, and
312 final String plainText = wikiTokenizer.token();
313 builder.append(plainText);
314 defaultIndexBuilder.addEntryWithString(indexedEntry, plainText, EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
318 public void onWikiLink(WikiTokenizer wikiTokenizer) {
319 final String plainText = wikiTokenizer.wikiLinkText();
320 builder.append(plainText);
321 // TODO: should check for English before appending.
322 defaultIndexBuilder.addEntryWithString(indexedEntry, plainText, EntryTypeName.WIKTIONARY_TRANSLATION_WIKI_TEXT);
326 public void onFunction(final String name,
327 final List<String> args, final Map<String, String> namedArgs) {
328 final WikiFunctionCallback functionCallback = functionCallbacks.get(name);
329 if (functionCallback != null) {
330 // Dispatch the handling elsewhere.
331 functionCallback.onWikiFunction(name, args, namedArgs);
333 // Default function handling:
334 for (int i = 0; i < args.size(); ++i) {
335 args.set(i, WikiTokenizer.toPlainText(args.get(i)));
337 for (final Map.Entry<String, String> entry : namedArgs.entrySet()) {
338 entry.setValue(WikiTokenizer.toPlainText(entry.getValue()));
340 WikiTokenizer.appendFunction(builder, name, args, namedArgs);
345 public void onMarkup(WikiTokenizer wikiTokenizer) {
350 public void onComment(WikiTokenizer wikiTokenizer) {
355 public void onNewline(WikiTokenizer wikiTokenizer) {
360 public void onHeading(WikiTokenizer wikiTokenizer) {
365 public void onListItem(WikiTokenizer wikiTokenizer) {
370 private void doTranslationLine(final String line, final String lang, final String title, final String pos, final String sense, final String rest) {
371 // Good chance we'll actually file this one...
372 final PairEntry pairEntry = new PairEntry();
373 final IndexedEntry indexedEntry = new IndexedEntry(pairEntry);
375 final StringBuilder foreignText = new StringBuilder();
376 final WikiTokenizer wikiTokenizer = new WikiTokenizer(rest, false);
377 while (wikiTokenizer.nextToken() != null) {
379 if (wikiTokenizer.isPlainText()) {
380 final String plainText = wikiTokenizer.token();
381 foreignText.append(plainText);
382 foreignIndexBuilder.addEntryWithString(indexedEntry, plainText, EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
384 } else if (wikiTokenizer.isWikiLink()) {
385 final String plainText = wikiTokenizer.wikiLinkText();
386 foreignText.append(plainText);
387 // TODO: should check for English before appending.
388 foreignIndexBuilder.addEntryWithString(indexedEntry, plainText, EntryTypeName.WIKTIONARY_TRANSLATION_WIKI_TEXT);
390 } else if (wikiTokenizer.isFunction()) {
391 final String functionName = wikiTokenizer.functionName();
392 final List<String> args = wikiTokenizer.functionPositionArgs();
393 final Map<String,String> namedArgs = wikiTokenizer.functionNamedArgs();
395 if (functionName.equals("t") || functionName.equals("t+") || functionName.equals("t-") || functionName.equals("tø") || functionName.equals("apdx-t")) {
396 if (args.size() < 2) {
397 LOG.warning("{{t}} with too few args: " + line + ", title=" + title);
400 final String langCode = get(args, 0);
401 final String word = get(args, 1);
402 final String gender = get(args, 2);
403 final String transliteration = namedArgs.get("tr");
404 if (foreignText.length() > 0) {
405 foreignText.append("");
407 foreignText.append(word);
408 foreignIndexBuilder.addEntryWithString(indexedEntry, word, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
409 if (gender != null) {
410 foreignText.append(String.format(" {%s}", gender));
412 if (transliteration != null) {
413 foreignText.append(String.format(TRANSLITERATION_FORMAT, transliteration));
414 foreignIndexBuilder.addEntryWithString(indexedEntry, transliteration, EntryTypeName.WIKTIONARY_TRANSLITERATION);
416 } else if (functionName.equals("qualifier")) {
417 if (args.size() == 0) {
418 foreignText.append(wikiTokenizer.token());
420 String qualifier = args.get(0);
421 if (!namedArgs.isEmpty() || args.size() > 1) {
422 LOG.warning("weird qualifier: " + line);
425 foreignText.append("(").append(qualifier).append(")");
427 } else if (encodings.contains(functionName)) {
428 foreignText.append("").append(args.get(0));
429 foreignIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
430 } else if (isGender(functionName)) {
431 appendGender(foreignText, functionName, args);
432 } else if (functionName.equals("g")) {
433 foreignText.append("{g}");
434 } else if (functionName.equals("l")) {
435 // encodes text in various langs.
437 foreignText.append("").append(args.get(1));
438 foreignIndexBuilder.addEntryWithString(indexedEntry, args.get(1), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
439 // TODO: transliteration
440 } else if (functionName.equals("term")) {
441 // cross-reference to another dictionary
442 foreignText.append("").append(args.get(0));
443 foreignIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
444 // TODO: transliteration
445 } else if (functionName.equals("italbrac") || functionName.equals("gloss")) {
446 // TODO: put this text aside to use it.
447 foreignText.append("[").append(args.get(0)).append("]");
448 foreignIndexBuilder.addEntryWithString(indexedEntry, args.get(0), EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
449 } else if (functionName.equals("ttbc")) {
450 LOG.warning("Unexpected {{ttbc}}");
451 } else if (functionName.equals("trreq")) {
452 } else if (functionName.equals("not used")) {
453 foreignText.append("(not used)");
454 } else if (functionName.equals("t-image")) {
455 // American sign language
458 namedArgs.keySet().removeAll(USELESS_WIKI_ARGS);
459 WikiTokenizer.appendFunction(foreignText.append("{{"), functionName, args, namedArgs).append("}}");
462 } else if (wikiTokenizer.isNewline()) {
463 } else if (wikiTokenizer.isComment()) {
464 } else if (wikiTokenizer.isMarkup()) {
466 LOG.warning("Bad translation token: " + wikiTokenizer.token());
469 if (foreignText.length() == 0) {
470 LOG.warning("Empty foreignText: " + line);
475 foreignText.insert(0, String.format("(%s) ", lang));
478 StringBuilder englishText = new StringBuilder();
480 englishText.append(title);
482 englishText.append(" (").append(sense).append(")");
483 enIndexBuilder.addEntryWithString(indexedEntry, sense, EntryTypeName.WIKTIONARY_TRANSLATION_SENSE, EntryTypeName.WIKTIONARY_TRANSLATION_SENSE);
486 englishText.append(" (").append(pos.toLowerCase()).append(")");
488 enIndexBuilder.addEntryWithString(indexedEntry, title, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
490 final Pair pair = new Pair(trim(englishText.toString()), trim(foreignText.toString()), swap);
491 pairEntry.pairs.add(pair);
492 if (!pairsAdded.add(pair.toString())) {
493 LOG.warning("Duplicate pair: " + pair.toString());
495 if (pair.toString().equals("libero {m} :: free (adjective)")) {
496 System.out.println();
502 private void appendGender(final StringBuilder otherText,
503 final String functionName, final List<String> args) {
504 otherText.append("{");
505 otherText.append(functionName);
506 for (int i = 0; i < args.size(); ++i) {
507 otherText.append("|").append(args.get(i));
509 otherText.append("}");
513 private boolean isGender(final String functionName) {
514 return functionName.equals("m") || functionName.equals("f") || functionName.equals("n") || functionName.equals("p");
517 Set<String> pairsAdded = new LinkedHashSet<String>();
519 // -------------------------------------------------------------------------
521 private void doForeignWord(final String lang, final String title, final String text) {
522 final WikiTokenizer wikiTokenizer = new WikiTokenizer(text);
523 while (wikiTokenizer.nextToken() != null) {
524 if (wikiTokenizer.isHeading()) {
525 final String headingName = wikiTokenizer.headingWikiText();
526 if (headingName.equals("Translations")) {
527 LOG.warning("Translations not in English section: " + title);
528 } else if (headingName.equals("Pronunciation")) {
529 //doPronunciation(wikiLineReader);
530 } else if (partOfSpeechHeader.matcher(headingName).matches()) {
531 doForeignPartOfSpeech(lang, title, headingName, wikiTokenizer.headingDepth(), wikiTokenizer);
538 static final class ListSection {
539 final String firstPrefix;
540 final String firstLine;
541 final List<String> nextPrefixes = new ArrayList<String>();
542 final List<String> nextLines = new ArrayList<String>();
544 public ListSection(String firstPrefix, String firstLine) {
545 this.firstPrefix = firstPrefix;
546 this.firstLine = firstLine;
550 public String toString() {
551 return firstPrefix + firstLine + "{ " + nextPrefixes + "}";
556 int foreignCount = 0;
557 private void doForeignPartOfSpeech(final String lang, String title, final String posHeading, final int posDepth, WikiTokenizer wikiTokenizer) {
558 if (++foreignCount % 1000 == 0) {
559 LOG.info("***" + lang + ", " + title + ", pos=" + posHeading + ", foreignCount=" + foreignCount);
561 if (title.equals("moro")) {
562 System.out.println();
565 boolean titleAppended = false;
566 final StringBuilder foreignBuilder = new StringBuilder();
567 final Collection<String> wordForms = new ArrayList<String>();
568 final List<ListSection> listSections = new ArrayList<ListSection>();
572 ListSection lastListSection = null;
574 int currentHeadingDepth = posDepth;
575 while (wikiTokenizer.nextToken() != null) {
576 if (wikiTokenizer.isHeading()) {
577 currentHeadingDepth = wikiTokenizer.headingDepth();
579 if (currentHeadingDepth <= posDepth) {
580 wikiTokenizer.returnToLineStart();
585 if (currentHeadingDepth > posDepth) {
586 // TODO: deal with other neat info sections
590 if (wikiTokenizer.isFunction()) {
591 final String name = wikiTokenizer.functionName();
592 final List<String> args = wikiTokenizer.functionPositionArgs();
593 final Map<String,String> namedArgs = wikiTokenizer.functionNamedArgs();
594 // First line is generally a repeat of the title with some extra information.
595 // We need to build up the left side (foreign text, tokens) separately from the
596 // right side (English). The left-side may get paired with multiple right sides.
597 // The left side should get filed under every form of the word in question (singular, plural).
599 // For verbs, the conjugation comes later on in a deeper section.
600 // Ideally, we'd want to file every English entry with the verb
601 // under every verb form coming from the conjugation.
602 // Ie. under "fa": see: "make :: fare" and "do :: fare"
603 // But then where should we put the conjugation table?
604 // I think just under fare. But then we need a way to link to the entry (actually the row, since entries doesn't show up!)
605 // for the conjugation table from "fa".
606 // Would like to be able to link to a lang#token.
607 if (isGender(name)) {
608 appendGender(foreignBuilder, name, args);
609 } else if (name.equals("wikipedia")) {
610 namedArgs.remove("lang");
611 if (args.size() > 1 || !namedArgs.isEmpty()) {
613 foreignBuilder.append(wikiTokenizer.token());
614 } else if (args.size() == 1) {
615 foreignBuilder.append(wikiTokenizer.token());
617 //foreignBuilder.append(title);
619 } else if (name.equals("attention") || name.equals("zh-attention")) {
620 // See: http://en.wiktionary.org/wiki/Template:attention
622 } else if (name.equals("infl") || name.equals("head")) {
623 // See: http://en.wiktionary.org/wiki/Template:infl
624 final String langCode = get(args, 0);
625 String head = namedArgs.remove("head");
627 head = namedArgs.remove("title"); // Bug
632 head = WikiTokenizer.toPlainText(head);
634 titleAppended = true;
636 namedArgs.keySet().removeAll(USELESS_WIKI_ARGS);
638 final String tr = namedArgs.remove("tr");
639 String g = namedArgs.remove("g");
641 g = namedArgs.remove("gender");
643 final String g2 = namedArgs.remove("g2");
644 final String g3 = namedArgs.remove("g3");
646 foreignBuilder.append(head);
649 foreignBuilder.append(" {").append(g);
651 foreignBuilder.append("|").append(g2);
654 foreignBuilder.append("|").append(g3);
656 foreignBuilder.append("}");
660 foreignBuilder.append(String.format(TRANSLITERATION_FORMAT, tr));
664 final String pos = get(args, 1);
666 foreignBuilder.append(" (").append(pos).append(")");
668 for (int i = 2; i < args.size(); i += 2) {
669 final String inflName = get(args, i);
670 final String inflValue = get(args, i + 1);
671 foreignBuilder.append(", ").append(WikiTokenizer.toPlainText(inflName));
672 if (inflValue != null && inflValue.length() > 0) {
673 foreignBuilder.append(": ").append(WikiTokenizer.toPlainText(inflValue));
674 wordForms.add(inflValue);
677 for (final String key : namedArgs.keySet()) {
678 final String value = WikiTokenizer.toPlainText(namedArgs.get(key));
679 foreignBuilder.append(" ").append(key).append("=").append(value);
680 wordForms.add(value);
682 } else if (name.equals("it-noun")) {
683 titleAppended = true;
684 final String base = get(args, 0);
685 final String gender = get(args, 1);
686 final String singular = base + get(args, 2);
687 final String plural = base + get(args, 3);
688 foreignBuilder.append(String.format(" %s {%s}, %s {pl}", singular, gender, plural, plural));
689 wordForms.add(singular);
690 wordForms.add(plural);
691 if (!namedArgs.isEmpty() || args.size() > 4) {
692 LOG.warning("Invalid it-noun: " + wikiTokenizer.token());
694 } else if (name.equals("it-proper noun")) {
695 foreignBuilder.append(wikiTokenizer.token());
696 } else if (name.equals("it-adj")) {
697 foreignBuilder.append(wikiTokenizer.token());
698 } else if (name.startsWith("it-conj")) {
699 if (name.equals("it-conj-are")) {
700 itConjAre(args, namedArgs);
701 } else if (name.equals("it-conj-ere")) {
702 } else if (name.equals("it-conj-ire")) {
704 LOG.warning("Unknown conjugation: " + wikiTokenizer.token());
708 foreignBuilder.append(wikiTokenizer.token());
709 // LOG.warning("Unknown function: " + wikiTokenizer.token());
711 } else if (wikiTokenizer.isListItem()) {
712 final String prefix = wikiTokenizer.listItemPrefix();
713 if (lastListSection != null &&
714 prefix.startsWith(lastListSection.firstPrefix) &&
715 prefix.length() > lastListSection.firstPrefix.length()) {
716 lastListSection.nextPrefixes.add(prefix);
717 lastListSection.nextLines.add(wikiTokenizer.listItemWikiText());
719 lastListSection = new ListSection(prefix, wikiTokenizer.listItemWikiText());
720 listSections.add(lastListSection);
722 } else if (lastListSection != null) {
723 // Don't append anything after the lists, because there's crap.
724 } else if (wikiTokenizer.isWikiLink()) {
726 foreignBuilder.append(wikiTokenizer.wikiLinkText());
728 } else if (wikiTokenizer.isPlainText()) {
730 foreignBuilder.append(wikiTokenizer.token());
732 } else if (wikiTokenizer.isMarkup() || wikiTokenizer.isNewline() || wikiTokenizer.isComment()) {
735 LOG.warning("Unexpected token: " + wikiTokenizer.token());
740 // Here's where we exit.
741 // Should we make an entry even if there are no foreign list items?
742 String foreign = foreignBuilder.toString().trim();
743 if (!titleAppended && !foreign.toLowerCase().startsWith(title.toLowerCase())) {
744 foreign = String.format("%s %s", title, foreign);
746 if (!langPattern.matcher(lang).matches()) {
747 foreign = String.format("(%s) %s", lang, foreign);
749 for (final ListSection listSection : listSections) {
750 doForeignListItem(foreign, title, wordForms, listSection);
756 static final Pattern UNINDEXED_WIKI_TEXT = Pattern.compile(
757 "(first|second|third)-person (singular|plural)|" +
762 // Might only want to remove "lang" if it's equal to "zh", for example.
763 static final Set<String> USELESS_WIKI_ARGS = new LinkedHashSet<String>(Arrays.asList("lang", "sc", "sort", "cat"));
765 private void doForeignListItem(final String foreignText, String title, final Collection<String> forms, final ListSection listSection) {
767 final String prefix = listSection.firstPrefix;
768 if (prefix.length() > 1) {
769 // Could just get looser and say that any prefix longer than first is a sublist.
770 LOG.warning("Prefix too long: " + listSection);
774 final PairEntry pairEntry = new PairEntry();
775 final IndexedEntry indexedEntry = new IndexedEntry(pairEntry);
777 final StringBuilder englishBuilder = new StringBuilder();
779 final String mainLine = listSection.firstLine;
780 final WikiTokenizer englishTokenizer = new WikiTokenizer(mainLine, false);
781 while (englishTokenizer.nextToken() != null) {
782 // TODO handle form of....
783 if (englishTokenizer.isPlainText()) {
784 englishBuilder.append(englishTokenizer.token());
785 enIndexBuilder.addEntryWithString(indexedEntry, englishTokenizer.token(), EntryTypeName.WIKTIONARY_ENGLISH_DEF);
786 } else if (englishTokenizer.isWikiLink()) {
787 final String text = englishTokenizer.wikiLinkText();
788 final String link = englishTokenizer.wikiLinkDest();
790 if (link.contains("#English")) {
791 englishBuilder.append(text);
792 enIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
793 } else if (link.contains("#") && this.langPattern.matcher(link).find()) {
794 englishBuilder.append(text);
795 foreignIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_OTHER_LANG);
796 } else if (link.equals("plural")) {
797 englishBuilder.append(text);
799 //LOG.warning("Special link: " + englishTokenizer.token());
800 enIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
801 englishBuilder.append(text);
805 englishBuilder.append(text);
806 if (!UNINDEXED_WIKI_TEXT.matcher(text).find()) {
807 enIndexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
810 } else if (englishTokenizer.isFunction()) {
811 final String name = englishTokenizer.functionName();
812 final List<String> args = englishTokenizer.functionPositionArgs();
813 final Map<String,String> namedArgs = englishTokenizer.functionNamedArgs();
816 name.equals("form of") ||
817 name.contains("conjugation of") ||
818 name.contains("participle of") ||
819 name.contains("gerund of") ||
820 name.contains("feminine of") ||
821 name.contains("plural of")) {
822 String formName = name;
823 if (name.equals("form of")) {
824 formName = remove(args, 0, null);
826 if (formName == null) {
827 LOG.warning("Missing form name: " + title);
828 formName = "form of";
830 String baseForm = get(args, 1, "");
831 if ("".equals(baseForm)) {
832 baseForm = get(args, 0, null);
835 remove(args, 0, null);
837 namedArgs.keySet().removeAll(USELESS_WIKI_ARGS);
838 WikiTokenizer.appendFunction(englishBuilder.append("{"), formName, args, namedArgs).append("}");
839 if (baseForm != null) {
840 foreignIndexBuilder.addEntryWithString(indexedEntry, baseForm, EntryTypeName.WIKTIONARY_BASE_FORM_SINGLE, EntryTypeName.WIKTIONARY_BASE_FORM_MULTI);
842 // null baseForm happens in Danish.
843 LOG.warning("Null baseform: " + title);
845 } else if (name.equals("l")) {
846 // encodes text in various langs.
848 englishBuilder.append("").append(args.get(1));
849 final String langCode = args.get(0);
850 if ("en".equals(langCode)) {
851 enIndexBuilder.addEntryWithString(indexedEntry, args.get(1), EntryTypeName.WIKTIONARY_ENGLISH_DEF_WIKI_LINK);
853 foreignIndexBuilder.addEntryWithString(indexedEntry, args.get(1), EntryTypeName.WIKTIONARY_ENGLISH_DEF_OTHER_LANG);
855 // TODO: transliteration
857 } else if (name.equals("defn") || name.equals("rfdef")) {
859 // http://en.wiktionary.org/wiki/Wiktionary:Requests_for_deletion/Others#Template:defn
860 // Redundant, used for the same purpose as {{rfdef}}, but this
861 // doesn't produce the "This word needs a definition" text.
862 // Delete or redirect.
864 namedArgs.keySet().removeAll(USELESS_WIKI_ARGS);
865 if (args.size() == 0 && namedArgs.isEmpty()) {
866 englishBuilder.append("{").append(name).append("}");
868 WikiTokenizer.appendFunction(englishBuilder.append("{{"), name, args, namedArgs).append("}}");
870 // LOG.warning("Unexpected function: " + englishTokenizer.token());
873 if (englishTokenizer.isComment() || englishTokenizer.isMarkup()) {
875 LOG.warning("Unexpected definition type: " + englishTokenizer.token());
880 final String english = trim(englishBuilder.toString());
881 if (english.length() > 0) {
882 final Pair pair = new Pair(english, trim(foreignText), this.swap);
883 pairEntry.pairs.add(pair);
884 foreignIndexBuilder.addEntryWithString(indexedEntry, title, EntryTypeName.WIKTIONARY_TITLE_SINGLE, EntryTypeName.WIKTIONARY_TITLE_MULTI);
885 for (final String form : forms) {
886 foreignIndexBuilder.addEntryWithString(indexedEntry, form, EntryTypeName.WIKTIONARY_INFLECTD_FORM_SINGLE, EntryTypeName.WIKTIONARY_INFLECTED_FORM_MULTI);
891 String lastForeign = null;
892 for (int i = 0; i < listSection.nextPrefixes.size(); ++i) {
893 final String nextPrefix = listSection.nextPrefixes.get(i);
894 final String nextLine = listSection.nextLines.get(i);
895 int dash = nextLine.indexOf("—");
898 dash = nextLine.indexOf("—");
902 dash = nextLine.indexOf(" - ");
906 if ((nextPrefix.equals("#:") || nextPrefix.equals("##:")) && dash != -1) {
907 final String foreignEx = nextLine.substring(0, dash);
908 final String englishEx = nextLine.substring(dash + mdashLen);
909 final Pair pair = new Pair(formatAndIndexExampleString(englishEx, enIndexBuilder, indexedEntry), formatAndIndexExampleString(foreignEx, foreignIndexBuilder, indexedEntry), swap);
910 if (pair.lang1 != "--" && pair.lang1 != "--") {
911 pairEntry.pairs.add(pair);
914 } else if (nextPrefix.equals("#:") || nextPrefix.equals("##:")){
915 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
916 lastForeign = nextLine;
917 if (pair.lang1 != "--" && pair.lang1 != "--") {
918 pairEntry.pairs.add(pair);
920 } else if (nextPrefix.equals("#::") || nextPrefix.equals("#**")) {
921 if (lastForeign != null && pairEntry.pairs.size() > 0) {
922 pairEntry.pairs.remove(pairEntry.pairs.size() - 1);
923 final Pair pair = new Pair(formatAndIndexExampleString(nextLine, enIndexBuilder, indexedEntry), formatAndIndexExampleString(lastForeign, foreignIndexBuilder, indexedEntry), swap);
924 if (pair.lang1 != "--" || pair.lang2 != "--") {
925 pairEntry.pairs.add(pair);
929 LOG.warning("TODO: English example with no foreign: " + title + ", " + nextLine);
930 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
931 if (pair.lang1 != "--" || pair.lang2 != "--") {
932 pairEntry.pairs.add(pair);
935 } else if (nextPrefix.equals("#*")) {
936 // Can't really index these.
937 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
938 lastForeign = nextLine;
939 if (pair.lang1 != "--" || pair.lang2 != "--") {
940 pairEntry.pairs.add(pair);
942 } else if (nextPrefix.equals("#::*") || nextPrefix.equals("##") || nextPrefix.equals("#*:") || nextPrefix.equals("#:*") || true) {
943 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
944 if (pair.lang1 != "--" || pair.lang2 != "--") {
945 pairEntry.pairs.add(pair);
953 private String formatAndIndexExampleString(final String example, final IndexBuilder indexBuilder, final IndexedEntry indexedEntry) {
954 final WikiTokenizer wikiTokenizer = new WikiTokenizer(example, false);
955 final StringBuilder builder = new StringBuilder();
956 boolean insideTripleQuotes = false;
957 while (wikiTokenizer.nextToken() != null) {
958 if (wikiTokenizer.isPlainText()) {
959 builder.append(wikiTokenizer.token());
960 if (indexBuilder != null) {
961 indexBuilder.addEntryWithString(indexedEntry, wikiTokenizer.token(), EntryTypeName.WIKTIONARY_EXAMPLE);
963 } else if (wikiTokenizer.isWikiLink()) {
964 final String text = wikiTokenizer.wikiLinkText().replaceAll("'", "");
965 builder.append(text);
966 if (indexBuilder != null) {
967 indexBuilder.addEntryWithString(indexedEntry, text, EntryTypeName.WIKTIONARY_EXAMPLE);
969 } else if (wikiTokenizer.isFunction()) {
970 builder.append(wikiTokenizer.token());
971 } else if (wikiTokenizer.isMarkup()) {
972 if (wikiTokenizer.token().equals("'''")) {
973 insideTripleQuotes = !insideTripleQuotes;
975 } else if (wikiTokenizer.isComment() || wikiTokenizer.isNewline()) {
978 LOG.warning("unexpected token: " + wikiTokenizer.token());
981 final String result = trim(builder.toString());
982 return result.length() > 0 ? result : "--";
986 private void itConjAre(List<String> args, Map<String, String> namedArgs) {
987 final String base = args.get(0);
988 final String aux = args.get(1);
990 putIfMissing(namedArgs, "inf", base + "are");
991 putIfMissing(namedArgs, "aux", aux);
992 putIfMissing(namedArgs, "ger", base + "ando");
993 putIfMissing(namedArgs, "presp", base + "ante");
994 putIfMissing(namedArgs, "pastp", base + "ato");
996 putIfMissing(namedArgs, "pres1s", base + "o");
997 putIfMissing(namedArgs, "pres2s", base + "i");
998 putIfMissing(namedArgs, "pres3s", base + "a");
999 putIfMissing(namedArgs, "pres1p", base + "iamo");
1000 putIfMissing(namedArgs, "pres2p", base + "ate");
1001 putIfMissing(namedArgs, "pres3p", base + "ano");
1003 putIfMissing(namedArgs, "imperf1s", base + "avo");
1004 putIfMissing(namedArgs, "imperf2s", base + "avi");
1005 putIfMissing(namedArgs, "imperf3s", base + "ava");
1006 putIfMissing(namedArgs, "imperf1p", base + "avamo");
1007 putIfMissing(namedArgs, "imperf2p", base + "avate");
1008 putIfMissing(namedArgs, "imperf3p", base + "avano");
1010 putIfMissing(namedArgs, "prem1s", base + "ai");
1011 putIfMissing(namedArgs, "prem2s", base + "asti");
1012 putIfMissing(namedArgs, "prem3s", base + "ò");
1013 putIfMissing(namedArgs, "prem1p", base + "ammo");
1014 putIfMissing(namedArgs, "prem2p", base + "aste");
1015 putIfMissing(namedArgs, "prem3p", base + "arono");
1017 putIfMissing(namedArgs, "fut1s", base + "erò");
1018 putIfMissing(namedArgs, "fut2s", base + "erai");
1019 putIfMissing(namedArgs, "fut3s", base + "erà");
1020 putIfMissing(namedArgs, "fut1p", base + "eremo");
1021 putIfMissing(namedArgs, "fut2p", base + "erete");
1022 putIfMissing(namedArgs, "fut3p", base + "eranno");
1024 putIfMissing(namedArgs, "cond1s", base + "erei");
1025 putIfMissing(namedArgs, "cond2s", base + "eresti");
1026 putIfMissing(namedArgs, "cond3s", base + "erebbe");
1027 putIfMissing(namedArgs, "cond1p", base + "eremmo");
1028 putIfMissing(namedArgs, "cond2p", base + "ereste");
1029 putIfMissing(namedArgs, "cond3p", base + "erebbero");
1030 // Subjunctive / congiuntivo
1031 putIfMissing(namedArgs, "sub123s", base + "i");
1032 putIfMissing(namedArgs, "sub1p", base + "iamo");
1033 putIfMissing(namedArgs, "sub2p", base + "iate");
1034 putIfMissing(namedArgs, "sub3p", base + "ino");
1035 // Imperfect subjunctive
1036 putIfMissing(namedArgs, "impsub12s", base + "assi");
1037 putIfMissing(namedArgs, "impsub3s", base + "asse");
1038 putIfMissing(namedArgs, "impsub1p", base + "assimo");
1039 putIfMissing(namedArgs, "impsub2p", base + "aste");
1040 putIfMissing(namedArgs, "impsub3p", base + "assero");
1042 putIfMissing(namedArgs, "imp2s", base + "a");
1043 putIfMissing(namedArgs, "imp3s", base + "i");
1044 putIfMissing(namedArgs, "imp1p", base + "iamo");
1045 putIfMissing(namedArgs, "imp2p", base + "ate");
1046 putIfMissing(namedArgs, "imp3p", base + "ino");
1049 itConj(args, namedArgs);
1053 private void itConj(List<String> args, Map<String, String> namedArgs) {
1054 // TODO Auto-generated method stub
1059 private static void putIfMissing(final Map<String, String> namedArgs, final String key,
1060 final String value) {
1061 final String oldValue = namedArgs.get(key);
1062 if (oldValue == null || oldValue.length() == 0) {
1063 namedArgs.put(key, value);
1067 // TODO: check how ='' and =| are manifested....
1068 // TODO: get this right in -are
1069 private static void putOrNullify(final Map<String, String> namedArgs, final String key,
1070 final String value) {
1071 final String oldValue = namedArgs.get(key);
1072 if (oldValue == null/* || oldValue.length() == 0*/) {
1073 namedArgs.put(key, value);
1075 if (oldValue.equals("''")) {
1076 namedArgs.put(key, "");
1081 static final Pattern whitespace = Pattern.compile("\\s+");
1082 static String trim(final String s) {
1083 return whitespace.matcher(s).replaceAll(" ").trim();