]> gitweb.fperrin.net Git - DictionaryPC.git/commitdiff
go
authorThad Hughes <thad.hughes@gmail.com>
Thu, 16 Dec 2010 21:15:35 +0000 (13:15 -0800)
committerThad Hughes <thad.hughes@gmail.com>
Tue, 13 Dec 2011 18:39:43 +0000 (10:39 -0800)
src/com/hughes/android/dictionary/engine/DictionaryBuilderMain.java [new file with mode: 0644]
src/com/hughes/android/dictionary/engine/DictionaryBuilderTest.java
src/com/hughes/android/dictionary/engine/DictionaryBuilder_DE.java [deleted file]
src/com/hughes/android/dictionary/engine/DictionaryTest.java
src/com/hughes/android/dictionary/engine/IndexBuilder.java
src/com/hughes/android/dictionary/parser/DictFileParser.java
src/com/hughes/android/dictionary/parser/EnWiktionaryXmlParser.java
src/com/hughes/android/dictionary/parser/WikiCallback.java
src/com/hughes/android/dictionary/parser/WikiParser.java
src/com/hughes/android/dictionary/parser/WikiParserTest.java
src/com/hughes/android/dictionary/parser/WikiWord.java

diff --git a/src/com/hughes/android/dictionary/engine/DictionaryBuilderMain.java b/src/com/hughes/android/dictionary/engine/DictionaryBuilderMain.java
new file mode 100644 (file)
index 0000000..9a60d37
--- /dev/null
@@ -0,0 +1,161 @@
+package com.hughes.android.dictionary.engine;
+
+import java.io.File;
+import java.io.PrintWriter;
+import java.io.RandomAccessFile;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+public class DictionaryBuilderMain extends TestCase {
+  
+  static class Lang {
+    final String nameRegex;
+    final String code;
+    public Lang(String nameRegex, String code) {
+      this.nameRegex = nameRegex;
+      this.code = code;
+    }
+  }
+  
+  
+  public static void main(final String[] args) throws Exception {
+    
+    Lang[] langs1 = new Lang[] { 
+        new Lang("^English$", "EN"),
+        new Lang("^German$", "DE"),
+    };
+    Lang[] langs2 = new Lang[] { 
+        new Lang("^Italian$", "IT"),
+        new Lang("^German$", "DE"),
+        new Lang("^Afrikaans$", "AF"),
+        new Lang("^Armenian$", "HY"),
+        new Lang("^Arabic$", "AR"),
+        new Lang("^Chinese$|^Mandarin$", "ZH"),
+        new Lang("^Croation$", "HR"),
+        new Lang("^Czech$", "CS"),
+        new Lang("^Dutch$", "NL"),
+        new Lang("^English$", "EN"),
+        new Lang("^Finnish$", "FI"),
+        new Lang("^French$", "FR"),
+        new Lang("^Greek$", "EL"),
+        new Lang("^Hebrew$", "HE"),
+        new Lang("^Hindi$", "HI"),
+        new Lang("^Icelandic$", "IS"),
+        new Lang("^Irish$", "GA"),
+        new Lang("^Japanese$", "JA"),
+        new Lang("^Korean$", "KO"),
+        new Lang("^Kurdish$", "KU"),
+        new Lang("^Lithuanian$", "LT"),
+        new Lang("^Malay$", "MS"),
+        new Lang("^Maori$", "MI"),
+        new Lang("^Mongolian$", "MN"),
+        new Lang("^Norwegian$", "NO"),
+        new Lang("^Persian$", "FA"),
+        new Lang("^Portuguese$", "PT"),
+        new Lang("^Romanian$", "RO"),
+        new Lang("^Russian$", "RU"),
+        new Lang("^Sanskrit$", "SA"),
+        new Lang("^Serbian$", "SR"),
+        new Lang("^Somali$", "SO"),
+        new Lang("^Spanish$", "ES"),
+        new Lang("^Sudanese$", "SU"),
+        new Lang("^Swedish$", "SV"),
+        new Lang("^Tajik$", "TG"),
+        new Lang("^Thai$", "TH"),
+        new Lang("^Tibetan$", "BO"),
+        new Lang("^Turkish$", "TR"),
+        new Lang("^Ukranian$", "UK"),
+        new Lang("^Vietnamese$", "VI"),
+        new Lang("^Welsh$", "CY"),
+        new Lang("^Yiddish$", "YI"),
+        new Lang("^Zulu$", "ZU"),
+    };
+    
+    for (final Lang lang1 : langs1) {
+      for (final Lang lang2 : langs2) {
+        if (lang1.nameRegex.equals(lang2.nameRegex)) {
+          continue;
+        }
+        
+        int enIndex = -1;
+        if (lang2.code.equals("EN")) {
+          enIndex = 2;
+        }
+        if (lang1.code.equals("EN")) {
+          enIndex = 1;
+        }
+
+        final String dictFile = String.format("dictOutputs/%s-%s_enwiktionary.quickdic", lang1.code, lang2.code);
+        System.out.println("building dictFile: " + dictFile);
+        DictionaryBuilder.main(new String[] {
+            String.format("--dictOut=%s", dictFile),
+            String.format("--lang1=%s", lang1.code),
+            String.format("--lang2=%s", lang2.code),
+            String.format("--dictInfo=(EN)Wikitionary-based %s-%s dictionary", lang1.code, lang2.code),
+
+            "--input1=dictInputs/enwiktionary-20101015-pages-articles",
+            "--input1Name=enwiktionary",
+            "--input1Format=enwiktionary",
+            String.format("--input1TranslationPattern1=%s", lang1.nameRegex),
+            String.format("--input1TranslationPattern2=%s", lang2.nameRegex),
+            String.format("--input1EnIndex=%d", enIndex),
+        });
+        
+        // Print the entries for diffing.
+        final RandomAccessFile raf = new RandomAccessFile(new File(dictFile), "r");
+        final Dictionary dict = new Dictionary(raf);
+        final PrintWriter textOut = new PrintWriter(new File(dictFile + ".txt"));
+        final List<PairEntry> sorted = new ArrayList<PairEntry>(dict.pairEntries);
+        Collections.sort(sorted);
+        for (final PairEntry pairEntry : sorted) {
+          textOut.println(pairEntry.getRawText(false));
+        }
+        textOut.close();
+        raf.close();
+
+      }  // langs2
+    }  // langs1
+    
+    DictionaryBuilder.main(new String[] {
+        "--dictOut=dictOutputs/de-en_all.quickdic",
+        "--lang1=DE",
+        "--lang2=EN",
+        "--dictInfo=@dictInputs/de-en_all.info",
+
+        "--input2=dictInputs/de-en_chemnitz.txt",
+        "--input2Name=dictcc",
+        "--input2Charset=UTF8",
+        "--input2Format=chemnitz",
+
+        "--input3=dictInputs/de-en_dictcc.txt",
+        "--input3Name=dictcc",
+        "--input3Charset=UTF8",
+        "--input3Format=dictcc",
+        
+        "--input1=dictInputs/enwiktionary-20101015-pages-articles",
+        "--input1Name=enwiktionary",
+        "--input1Format=enwiktionary",
+        "--input1TranslationPattern1=^German$",
+        "--input1TranslationPattern2=^English$",
+        "--input1EnIndex=2",
+
+    });
+
+    DictionaryBuilder.main(new String[] {
+        "--dictOut=dictOutputs/de-en_chemnitz.quickdic",
+        "--lang1=DE",
+        "--lang2=EN",
+        "--dictInfo=@dictInputs/de-en_chemnitz.info",
+
+        "--input1=dictInputs/de-en_chemnitz.txt",
+        "--input1Name=dictcc",
+        "--input1Charset=UTF8",
+        "--input1Format=chemnitz",
+    });
+
+  }
+  
+}
index 903f327c4943be8c96d100756baaa4f3b3bccdfc..0a1b479e32f198bb571d235d71a50607d50b9b2f 100644 (file)
@@ -23,7 +23,7 @@ public class DictionaryBuilderTest extends TestCase {
         "--input3=testdata/enwiktionary_small.xml",
         "--input3Name=enwiktionary",
         "--input3Format=enwiktionary",
-        "--input3TranslationPattern1=German|Italian|Spanish|French|Japanese|Arabic|Mandarin",
+        "--input3TranslationPattern1=German|Italian|Spanish|French|Japanese|Arabic|Mandarin|Korean|Latin|Swedish|Croation|Serbian|Dutch|Afrikaans",
         "--input3TranslationPattern2=English",
         "--input3EnIndex=2",
 
@@ -45,7 +45,6 @@ public class DictionaryBuilderTest extends TestCase {
 
   
   public void testGermanCombined() throws Exception {
-    if (1==1) throw new RuntimeException();
     final File result = new File("testdata/de-en.quickdic");
     System.out.println("Writing to: " + result);
     DictionaryBuilder.main(new String[] {
diff --git a/src/com/hughes/android/dictionary/engine/DictionaryBuilder_DE.java b/src/com/hughes/android/dictionary/engine/DictionaryBuilder_DE.java
deleted file mode 100644 (file)
index 878a24f..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-package com.hughes.android.dictionary.engine;
-
-import junit.framework.TestCase;
-
-public class DictionaryBuilder_DE extends TestCase {
-  
-  public static void main(final String[] args) throws Exception {
-    
-    DictionaryBuilder.main(new String[] {
-        "--dictOut=dictOutputs/de-en_chemnitz.quickdic",
-        "--lang1=DE",
-        "--lang2=EN",
-        "--dictInfo=@dictInputs/de-en_chemnitz.info",
-
-        "--input1=dictInputs/de-en_chemnitz.txt",
-        "--input1Name=dictcc",
-        "--input1Charset=UTF8",
-        "--input1Format=chemnitz",
-    });
-
-    DictionaryBuilder.main(new String[] {
-        "--dictOut=dictOutputs/de-en_all.quickdic",
-        "--lang1=DE",
-        "--lang2=EN",
-        "--dictInfo=@dictInputs/de-en_all.info",
-
-        "--input1=dictInputs/de-en_chemnitz.txt",
-        "--input1Name=dictcc",
-        "--input1Charset=UTF8",
-        "--input1Format=chemnitz",
-
-        "--input2=dictInputs/de-en_dictcc.txt",
-        "--input2Name=dictcc",
-        "--input2Charset=UTF8",
-        "--input2Format=dictcc",
-    });
-
-  }
-  
-}
index 388e71d5a069ec066b4ea0c9e5b50c8321e76dc2..87103eba3059f5b681dab38ff82dafbcb8d861ee 100644 (file)
@@ -28,7 +28,7 @@ public class DictionaryTest extends TestCase {
   }
   
   public void testGermanIndex() throws IOException {
-    final RandomAccessFile raf = new RandomAccessFile("testdata/de-en.dict", "r");
+    final RandomAccessFile raf = new RandomAccessFile("testdata/de-en.quickdic", "r");
     final Dictionary dict = new Dictionary(raf);
     final Index deIndex = dict.indices.get(0);
     
index 0d6a3d938f87d8293756a0b1b9966e1459da85c9..9721fd3746b265f088d1512537e183ae3991aa6b 100644 (file)
@@ -95,6 +95,13 @@ public class IndexBuilder {
     }
     return entries;
   }
+
+  public void addEntryWithTokens(final EntryData entryData, final Set<String> tokens,
+      final EntryTypeName entryTypeName) {
+    for (final String token : tokens) {
+      getOrCreateEntries(token, entryTypeName).add(entryData);
+    }    
+  }
   
 
 }
index 1e01ae2bf9b2e4be43b44c400137a5649af8061c..2f53fdcf36f130605904c54bcb1a5084cd21ff1b 100644 (file)
@@ -6,7 +6,10 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.LinkedHashSet;
 import java.util.List;
+import java.util.Set;
 import java.util.logging.Logger;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
@@ -263,6 +266,13 @@ public class DictFileParser {
     }
     return field;
   }
+  
+  public static final Set<String> tokenize(final String text, final Pattern pattern) {
+    final String[] split = pattern.split(text);
+    final Set<String> result = new LinkedHashSet<String>(Arrays.asList(split));
+    result.remove("");
+    return result;
+  }
 
 
 }
index 51d63c8025a2744816ff4bdd1339e2f5c489d258..d5b90067c471774a18ec59992bece5ecc7bda1ce 100644 (file)
@@ -19,7 +19,10 @@ import org.xml.sax.SAXException;
 
 import com.hughes.android.dictionary.engine.DictionaryBuilder;
 import com.hughes.android.dictionary.engine.IndexBuilder;
-import com.hughes.android.dictionary.parser.WikiWord.TranslationSection;
+import com.hughes.android.dictionary.parser.WikiWord.FormOf;
+import com.hughes.android.dictionary.parser.WikiWord.Translation;
+import com.hughes.util.ListUtil;
+import com.hughes.util.StringUtil;
 
 public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler implements WikiCallback {
   
@@ -30,25 +33,30 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
       "Ligature|Idiom|Phrase|" +
       // These are @deprecated:
       "Noun form|Verb form|Adjective form|Nominal phrase|Noun phrase|" +
-      "Verb phrase|Transitive verb|Intransitive verb|Reflexive verb");
+      "Verb phrase|Transitive verb|Intransitive verb|Reflexive verb|" +
+      // These are extras I found:
+      "Determiner|Numeral|Number|Cardinal number|Ordinal number|Proverb|" +
+      "Particle|Interjection|Pronominal adverb" +
+      "Han character|Hanzi|Hanja|Kanji|Katakana character|Syllable");
 
   static final Pattern wikiMarkup =  Pattern.compile("\\[\\[|\\]\\]|''+");
 
-
-  final DictionaryBuilder dict;
+  final DictionaryBuilder dictBuilder;
   
   final IndexBuilder[] indexBuilders;
   final Pattern[] langPatterns;
+  final int enIndexBuilder;
 
   StringBuilder titleBuilder;
   StringBuilder textBuilder;
   StringBuilder currentBuilder = null;
 
-  public EnWiktionaryXmlParser(final DictionaryBuilder builder, final Pattern[] langPatterns, final int enIndexBuilder) {
+  public EnWiktionaryXmlParser(final DictionaryBuilder dictBuilder, final Pattern[] langPatterns, final int enIndexBuilder) {
     assert langPatterns.length == 2;
-    this.dict = builder;
-    this.indexBuilders = dict.indexBuilders.toArray(new IndexBuilder[0]);
+    this.dictBuilder = dictBuilder;
+    this.indexBuilders = dictBuilder.indexBuilders.toArray(new IndexBuilder[0]);
     this.langPatterns = langPatterns;
+    this.enIndexBuilder = enIndexBuilder;
   }
 
   @Override
@@ -90,30 +98,44 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
     parser.parse(file, this);
   }
   
+  int pageCount = 0;
   private void endPage() {
     title = titleBuilder.toString();
+    ++pageCount;
+    if (pageCount % 1000 == 0) {
+      System.out.println("pageCount=" + pageCount);
+    }
+    if (title.startsWith("Wiktionary:") ||
+        title.startsWith("Template:") ||
+        title.startsWith("Appendix:") ||
+        title.startsWith("Category:") ||
+        title.startsWith("Index:") ||
+        title.startsWith("MediaWiki:") ||
+        title.startsWith("TransWiki:") ||
+        title.startsWith("Citations:") ||
+        title.startsWith("Concordance:") ||
+        title.startsWith("Help:")) {
+      return;
+    }
     currentDepth = 0;
     words.clear();
     currentHeading = null;
-    WikiParser.parse(textBuilder.toString(), this);
+    insidePartOfSpeech = false;
+//    System.err.println("Working on page: " + title);
+    try {
+      WikiParser.parse(textBuilder.toString(), this);
+    } catch (Throwable e) {
+      System.err.println("Failure on page: " + title);
+      e.printStackTrace(System.err); 
+    }
 
    for (final WikiWord word : words) {
-     System.out.println("\n" + title + ", " + word.language + ", pron=" + word.accentToPronunciation);
-     if (word.partsOfSpeech.isEmpty() && title.indexOf(":") == -1) {
-       System.err.println("Word with no POS: " + title);
-     }
-     for (final WikiWord.PartOfSpeech partOfSpeech : word.partsOfSpeech) {
-       System.out.println("  pos: " + partOfSpeech.name);
-       
-       for (final TranslationSection translationSection : partOfSpeech.translationSections) {
-         System.out.println("    sense: " + translationSection.sense);
-         
-       }
-     }
-   }
-  }
+     word.wikiWordToQuickDic(dictBuilder, enIndexBuilder);
+   }  // WikiWord
+   
+  }  // endPage()
+
 
-  
   // ------------------------------------------------------------------------
   // ------------------------------------------------------------------------
   // ------------------------------------------------------------------------
@@ -147,7 +169,8 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
   final List<WikiWord> words = new ArrayList<WikiWord>();
   WikiWord currentWord;
   WikiWord.PartOfSpeech currentPartOfSpeech;
-  WikiWord.TranslationSection currentTranslationSection;
+  WikiWord.TranslationSense currentTranslationSense;
+  boolean insidePartOfSpeech;
   
   StringBuilder wikiBuilder = null;
   
@@ -166,82 +189,202 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
       "zh-tsp", "zh-zh-p"));
   static final Set<String> ignoreTemplates = new LinkedHashSet<String>(Arrays.asList(""));
   static final Set<String> grammarTemplates = new LinkedHashSet<String>(Arrays.asList("impf", "pf"));
+  static final Set<String> passThroughTemplates = new LinkedHashSet<String>(Arrays.asList("zzzzzzzzzzzzzzz"));
 
   @Override
   public void onTemplate(final List<String> positionalArgs, final Map<String,String> namedArgs) {
+    if (positionalArgs.isEmpty()) {
+      // This happens very rarely with special templates.
+      return;
+    }
     final String name = positionalArgs.get(0);
+    
+    namedArgs.remove("lang");
+    namedArgs.remove("nocat");
+    namedArgs.remove("sc");
 
     // Pronunciation
-    if (name.equals("a")) {
-      // accent tag
-      currentWord.currentPronunciation = new StringBuilder();
-      currentWord.accentToPronunciation.put(positionalArgs.get(1), currentWord.currentPronunciation);
-      return;
-    }
-    if (name.equals("IPA") || name.equals("SAMPA") || name.equals("enPR") || name.equals("rhymes")) {
-      namedArgs.remove("lang");
-      assert positionalArgs.size() >= 2 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs.toString(); 
-      if (currentWord.currentPronunciation == null) {
+    if (currentWord != null) {
+      if (name.equals("a")) {
+        // accent tag
         currentWord.currentPronunciation = new StringBuilder();
-        currentWord.accentToPronunciation.put("", currentWord.currentPronunciation);
+        currentWord.accentToPronunciation.put(positionalArgs.get(1), currentWord.currentPronunciation);
+        return;
       }
-      currentWord.currentPronunciation.append(name).append(": ");
-      for (int i = 1; i < positionalArgs.size(); ++i) {
-        if (i > 1) {
-          currentWord.currentPronunciation.append(", ");
+      
+      if (name.equals("IPA") || name.equals("SAMPA") || name.equals("X-SAMPA")|| name.equals("enPR")) {
+        namedArgs.remove("lang");
+        for (int i = 0; i < 100 && !namedArgs.isEmpty(); ++i) {
+          final String pron = namedArgs.remove("" + i);
+          if (pron != null) {
+            positionalArgs.add(pron);
+          } else {
+            if (i > 10) {
+              break;
+            }
+          }
+        }
+        if (!(positionalArgs.size() >= 2 && namedArgs.isEmpty())) {
+          System.err.println("Invalid pronunciation: " + positionalArgs.toString() + namedArgs.toString());
+        }
+        if (currentWord.currentPronunciation == null) {
+          currentWord.currentPronunciation = new StringBuilder();
+          currentWord.accentToPronunciation.put("", currentWord.currentPronunciation);
         }
-        final String pron = wikiMarkup.matcher(positionalArgs.get(1)).replaceAll("");
-        currentWord.currentPronunciation.append(pron).append("");
+        if (currentWord.currentPronunciation.length() > 0) {
+          currentWord.currentPronunciation.append("; ");
+        }
+        for (int i = 1; i < positionalArgs.size(); ++i) {
+          if (i > 1) {
+            currentWord.currentPronunciation.append(",");
+          }
+          final String pron = wikiMarkup.matcher(positionalArgs.get(1)).replaceAll("");
+          currentWord.currentPronunciation.append(pron).append("");
+        }
+        currentWord.currentPronunciation.append(" (").append(name).append(")");
+        return;
       }
+      
+      if (name.equals("qualifier")) {
+        //assert positionalArgs.size() == 2 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs.toString();
+        if (wikiBuilder == null) {
+          return;
+        }
+        wikiBuilder.append(" (").append(positionalArgs.get(1)).append(")");
+        return;
+      }
+      
+      if (name.equals("...")) {
+        // Skipping any elided text for brevity.
+        wikiBuilder.append("...");
+        return;
+      }
+      
+      if (passThroughTemplates.contains(name)) {
+        assert positionalArgs.size() == 1 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs;
+        wikiBuilder.append(name);
+        return;
+      }
+      
+      if (name.equals("audio") || name.equals("rhymes") || name.equals("hyphenation")) {
+        return;
+      }
+      
+      if ("Pronunciation".equals(currentHeading)) {
+        System.err.println("Unhandled pronunciation template: " + positionalArgs + namedArgs);
+        return;
+      }
+    }  // Pronunciation
+    
+    // Part of speech
+    if (insidePartOfSpeech) {
+      
+      // form of
+      if (name.equals("form of")) {
+        namedArgs.remove("sc");
+        if (positionalArgs.size() < 3 || positionalArgs.size() > 4) {
+          System.err.println("Invalid form of.");
+        }
+        final String token = positionalArgs.get(positionalArgs.size() == 3 ? 2 : 3);
+        final String grammarForm = WikiParser.simpleParse(positionalArgs.get(1));
+        currentPartOfSpeech.formOfs.add(new FormOf(grammarForm, token));
+        return;
+      }
+      
+      // The fallback plan: append the template!
+      if (wikiBuilder != null) {
+        wikiBuilder.append("{");
+        boolean first = true;
+        for (final String arg : positionalArgs) {
+          if (!first) {
+            wikiBuilder.append(", ");
+          }
+          first = false;
+          wikiBuilder.append(arg);
+        }
+        // This one isn't so useful.
+        for (final Map.Entry<String, String> entry : namedArgs.entrySet()) {
+          if (!first) {
+            wikiBuilder.append(", ");
+          }
+          first = false;
+          wikiBuilder.append(entry.getKey()).append("=").append(entry.getValue());
+        }
+        wikiBuilder.append("}");
+      }
+      
+      //System.err.println("Unhandled part of speech template: " + positionalArgs + namedArgs);
       return;
-    }
-    if (name.equals("audio")) {
-      return;
-    }
-    if ("Pronunciation".equals(currentHeading)) {
-      System.err.println("Unhandled template: " + name);
-    }
+    }  // Part of speech
 
+    
     // Translations
     if (name.equals("trans-top")) {
-      assert positionalArgs.size() == 2 && namedArgs.isEmpty();
-      currentTranslationSection = new WikiWord.TranslationSection();
-      currentPartOfSpeech.translationSections.add(currentTranslationSection);
+      assert positionalArgs.size() >= 1 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs + title;
+      
+      if (currentPartOfSpeech == null) {
+        assert !currentWord.partsOfSpeech.isEmpty() : title; 
+        System.err.println("Assuming last part of speech for non-nested translation section: " + title);
+        currentPartOfSpeech = ListUtil.getLast(currentWord.partsOfSpeech);
+      }
+      
+      currentTranslationSense = new WikiWord.TranslationSense();
+      currentPartOfSpeech.translationSenses.add(currentTranslationSense);
       if (positionalArgs.size() > 1) {
-        currentTranslationSection.sense = positionalArgs.get(1);
+        currentTranslationSense.sense = positionalArgs.get(1);
       }
       return;
-    }
+    }  // Translations
 
     if (wikiBuilder == null) {
       return;
     }    
-    if (name == "") {
-    } else  if (name.equals("m") || name.equals("f") || name.equals("n") || name.equals("c")) {
-      wikiBuilder.append("{").append(name).append("}");
+    if (name.equals("m") || name.equals("f") || name.equals("n") || name.equals("c")) {
+      assert positionalArgs.size() >= 1 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs.toString();
+      wikiBuilder.append("{");
+      for (int i = 1; i < positionalArgs.size(); ++i) {
+        wikiBuilder.append(i > 1 ? "," : "");
+        wikiBuilder.append(positionalArgs.get(i));
+      }
+      wikiBuilder.append(name).append("}");
+      
     } else  if (name.equals("p")) {
+      assert positionalArgs.size() == 1 && namedArgs.isEmpty();
       wikiBuilder.append("pl.");
+
     } else  if (name.equals("s")) {
+      assert positionalArgs.size() == 1 && namedArgs.isEmpty() || title.equals("dobra");
       wikiBuilder.append("sg.");
+      
     } else  if (grammarTemplates.contains(name)) {
+      assert positionalArgs.size() == 1 && namedArgs.isEmpty();
       wikiBuilder.append(name).append(".");
+
     } else  if (name.equals("l")) {
+      // This template is designed to generate a link to a specific language-section on the target page.
       wikiBuilder.append(positionalArgs.size() >= 4 ? positionalArgs.get(3) : positionalArgs.get(2));
+      
     } else if (name.equals("t") || name.equals("t+") || name.equals("t-") || name.equals("tø")) {
-      if (positionalArgs.size() >= 2) {
-        wikiBuilder.append(positionalArgs.get(1));
+      if (positionalArgs.size() > 2) {
+        wikiBuilder.append(positionalArgs.get(2));
       }
-      if (positionalArgs.size() >= 3) {
-        wikiBuilder.append(" {").append(positionalArgs.get(1)).append("}");
+      for (int i = 3; i < positionalArgs.size(); ++i) {
+        wikiBuilder.append(i == 3 ? " {" : ",");
+        wikiBuilder.append(positionalArgs.get(i));
+        wikiBuilder.append(i == positionalArgs.size() - 1 ? "}" : "");
       }
       final String transliteration = namedArgs.remove("tr");
       if (transliteration != null) {
         wikiBuilder.append(" (").append(transliteration).append(")");
       }
+      
     } else  if (name.equals("trreq")) {
       wikiBuilder.append("{{trreq}}");
+      
     } else if (name.equals("qualifier")) {
+      //assert positionalArgs.size() == 2 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs.toString();
       wikiBuilder.append(" (").append(positionalArgs.get(1)).append(")");
+      
     } else if (useRemainingArgTemplates.contains(name)) {
       for (int i = 1; i < positionalArgs.size(); ++i) {
         if (i != 1) {
@@ -250,11 +393,20 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
         wikiBuilder.append(positionalArgs.get(i));
       }
     } else if (ignoreTemplates.contains(name)) {
+      // Do nothing.
+      
     } else if (name.equals("initialism")) {
+      assert positionalArgs.size() <= 2 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs;
       wikiBuilder.append("Initialism");
+    } else if (name.equals("abbreviation")) {
+      assert positionalArgs.size() <= 2 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs;
+      wikiBuilder.append("Abbreviation");
+    } else if (name.equals("acronym")) {
+      assert positionalArgs.size() <= 2 && namedArgs.isEmpty() : positionalArgs.toString() + namedArgs;
+      wikiBuilder.append("Acronym");
     } else {
-      if (currentTranslationSection != null) {
-        System.err.println("Unhandled template: " + name);
+      if (currentTranslationSense != null) {
+        System.err.println("Unhandled template: " + positionalArgs.toString() + namedArgs);
       }
     }
   }
@@ -273,26 +425,31 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
     currentDepth = depth;
     if (currentPartOfSpeech != null && depth <= currentPartOfSpeech.depth) {
       currentPartOfSpeech = null;
+      insidePartOfSpeech = false;
     }
     if (currentWord != null && depth <= currentWord.depth) {
       currentWord = null;
     }
+    
+    currentHeading = null;
   }
   
   @Override
   public void onHeadingEnd(int depth) {
     final String name = wikiBuilder.toString().trim();
     wikiBuilder = null;
-    currentTranslationSection = null;
+    currentTranslationSense = null;
     currentHeading = name;
     
-    final boolean lang1 = langPatterns[0].matcher(name).matches();
-    final boolean lang2 = langPatterns[1].matcher(name).matches();
-    if (name.equalsIgnoreCase("English") || lang1 || lang2) {
-      currentWord = new WikiWord(depth);
+    final boolean lang0 = langPatterns[0].matcher(name).matches();
+    final boolean lang1 = langPatterns[1].matcher(name).matches();
+    if (name.equalsIgnoreCase("English") || lang0 || lang1 || name.equalsIgnoreCase("Translingual")) {
+      currentWord = new WikiWord(title, depth);
+      if (lang0 && lang1) {
+        System.err.println("Word is indexed in both index1 and index2: " + title);
+      }
       currentWord.language = name;
-      currentWord.isLang1 = lang1;
-      currentWord.isLang2 = lang2;
+      currentWord.index = lang0 ? 0 : (lang1 ? 1 : -1);
       words.add(currentWord);
       return;
     }
@@ -301,9 +458,15 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
       return;
     }
     
-    if (partOfSpeechHeader.matcher(name).matches()) {
+    if (currentPartOfSpeech != null && depth <= currentPartOfSpeech.depth) {
+      currentPartOfSpeech = null;
+    }
+    
+    insidePartOfSpeech = false;
+    if (currentPartOfSpeech == null && partOfSpeechHeader.matcher(name).matches()) {
       currentPartOfSpeech = new WikiWord.PartOfSpeech(depth, name);
       currentWord.partsOfSpeech.add(currentPartOfSpeech);
+      insidePartOfSpeech = true;
       return;
     }
     
@@ -311,24 +474,12 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
       if (currentWord == null || 
           !currentWord.language.equals("English") || 
           currentPartOfSpeech == null) {
-        System.out.println("Unexpected Translations section: " + title);
+        System.err.println("Unexpected Translations section: " + title);
         return;
       }
-      currentTranslationSection = new WikiWord.TranslationSection();
-      currentPartOfSpeech.translationSections.add(currentTranslationSection);
+      currentTranslationSense = new WikiWord.TranslationSense();
     }
     
-    if (name.equals("Translations")) {
-      if (currentWord == null || 
-          !currentWord.language.equals("English") || 
-          currentPartOfSpeech == null) {
-        System.out.println("Unexpected Translations section: " + title);
-        return;
-      }
-      currentTranslationSection = new WikiWord.TranslationSection();
-      currentPartOfSpeech.translationSections.add(currentTranslationSection);
-    }
-
   }
 
   @Override
@@ -342,27 +493,113 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
 
   @Override
   public void onListItemEnd(String header, int[] section) {
-    final String item = wikiBuilder.toString();
+    String item = wikiBuilder.toString().trim();
+    final String oldItem = item;
+    if (item.length() == 0) {
+      return;
+    }
+    item = WikiParser.simpleParse(item);
     wikiBuilder = null;
-    
-    if (item.indexOf("{{trreq}}") != -1) {
+        
+    // Part of speech
+    if (insidePartOfSpeech) {
+      assert currentPartOfSpeech != null : title + item;
+      if (header.equals("#") || 
+          header.equals("##") || 
+          header.equals("###") || 
+          header.equals("####") || 
+          header.equals(":#") || 
+          header.equals("::") ||
+          header.equals(":::*")) {
+        // Definition.
+        // :: should append, probably.
+        currentPartOfSpeech.newMeaning().meaning = item;
+        
+      // Source
+      } else if (header.equals("#*") ||
+                 header.equals("##*") ||
+                 header.equals("###*")) {
+        currentPartOfSpeech.lastMeaning().newExample().source = item;
+        
+      // Example
+      } else if (header.equals("#:") || 
+                 header.equals("#*:") || 
+                 header.equals("#:*") || 
+                 header.equals("##:") || 
+                 header.equals("##*:") || 
+                 header.equals("#:*:") || 
+                 header.equals("#:*#") ||
+                 header.equals("#*:") ||
+                 header.equals("*:") || 
+                 header.equals("#:::") ||
+                 header.equals("#**") ||
+                 header.equals("#*:::") ||
+                 header.equals("#:#") ||
+                 header.equals(":::") ||
+                 header.equals("##:*") ||
+                 header.equals("###*:")) {
+        StringUtil.appendLine(currentPartOfSpeech.lastMeaning().newExample().example, item);
+        
+      // Example in English
+      } else if (header.equals("#::") || 
+                 header.equals("#*::") || 
+                 header.equals("#:**") ||
+                 header.equals("#*#") ||
+                 header.equals("##*::")) {
+        StringUtil.appendLine(currentPartOfSpeech.lastMeaning().lastExample().exampleInEnglish, item);
+        
+      // Skip
+      } else if (header.equals("*") ||
+                 header.equals("**") ||
+                 header.equals("***") || 
+                 header.equals("*#") ||
+                 header.equals(":") ||
+                 header.equals("::*") ||
+                 header.equals("#**") ||
+                 header.equals(":*") ||
+                 header.equals("#*:*") ||
+                 header.equals("#*:**") || 
+                 header.equals("#*:#") || 
+                 header.equals("#*:*:") || 
+                 header.equals("#*:*") || 
+                 header.equals(";")) {
+        // might have: * {{seeCites}}
+        // * [[w:Arabic numerals|Arabic numerals]]: 2
+        //assert item.trim().length() == 0;
+        System.err.println("Skipping meaning: " + header + " " + item);
+      } else {
+        if (title.equals("Yellowknife")) {
+          return;
+        }
+        System.err.println("Busted heading: " + title + "  "+ header + " " + item);
+      }
       return;
     }
+    // Part of speech
     
-    if (currentTranslationSection != null) {
+    // Translation
+    if (currentTranslationSense != null) {
+      if (item.indexOf("{{[trreq]{}}}") != -1) {
+        return;
+      }
+
+      if (currentPartOfSpeech.translationSenses.isEmpty()) {
+        currentPartOfSpeech.translationSenses.add(currentTranslationSense);
+      }
+
       final int colonPos = item.indexOf(':');
       if (colonPos == -1) {
-        System.err.println("Invalid translation: " + item);
+        System.err.println("Invalid translation: title=" + title +  ",  item=" + item);
         return;
       }
       final String lang = item.substring(0, colonPos);
-      final String trans = item.substring(colonPos + 1);
+      final String trans = item.substring(colonPos + 1).trim();
       for (int i = 0; i < 2; ++i) {
         if (langPatterns[i].matcher(lang).find()) {
-          currentTranslationSection.translations.get(i).add(trans);
+          currentTranslationSense.translations.get(i).add(new Translation(lang, trans));
         }
       }
-    }
+    } // Translation
   }
 
   @Override
@@ -389,7 +626,7 @@ public class EnWiktionaryXmlParser extends org.xml.sax.helpers.DefaultHandler im
 
   @Override
   public void onUnterminated(String start, String rest) {
-    throw new RuntimeException(rest);
+    throw new RuntimeException(start + rest);
   }
   @Override
   public void onInvalidHeaderEnd(String rest) {
index ad009754f3ce8a303c36f4bf8d6da1636284e795..c3ba503f0673ba8e2badb00ee18c5394e7239362 100644 (file)
@@ -32,5 +32,5 @@ public interface WikiCallback {
   // Errors
   void onUnterminated(final String start, String rest);
   void onInvalidHeaderEnd(String rest);
-  
+    
 }
index 8dd486439a9c993291bbcc1a929497769488f7d8..5b73b872b71dfc47d4cacbdf5dfbf7919cc91b36 100644 (file)
@@ -35,10 +35,12 @@ public class WikiParser {
           String text = rest.substring(0, nextMarkupPos);
           whitespace.matcher(text).replaceAll(" ");
           callback.onText(text);
+          rest = rest.substring(nextMarkupPos);
         }
-        rest = rest.substring(nextMarkupPos);
         
-        if (rest.startsWith("\n")) {
+        if (rest.equals("")) {
+          continue;
+        } else if (rest.startsWith("\n")) {
           rest = rest.substring(1);
           
           if (insideHeaderDepth != -1) {
@@ -50,14 +52,16 @@ public class WikiParser {
           
           final Matcher headerMatcher = headerStart.matcher(rest);
           if (headerMatcher.find()) {
+            lastListItem = null;
             insideHeaderDepth = headerMatcher.group().length();            
             callback.onHeadingStart(insideHeaderDepth);
             rest = rest.substring(headerMatcher.group().length());
             continue;
           }
-          
-          if (listStart.matcher(rest).find()) {
-            lastListItem = matcher.group();
+
+          final Matcher listStartMatcher = listStart.matcher(rest);
+          if (listStartMatcher.find()) {
+            lastListItem = listStartMatcher.group();
             callback.onListItemStart(lastListItem, null);
             rest = rest.substring(lastListItem.length());
             continue;
@@ -86,15 +90,22 @@ public class WikiParser {
             return;
           }
           final String template = rest.substring(2, end).trim();
-          final String[] templateArray = pipeSplit.split(template);
+          //todo: this doesn't work.  can't split pipes inside [[asdf|asdf]]
+          final List<String> templateArray = new ArrayList<String>();
+          contextSensitivePipeSplit(template, templateArray);
           positionalArgs.clear();
           namedArgs.clear();
-          for (int i = 0; i < templateArray.length; ++i) {
-            int equalPos = templateArray[i].indexOf('=');
+          for (int i = 0; i < templateArray.size(); ++i) {
+            
+            int equalPos = -1;
+            do {
+              equalPos = templateArray.get(i).indexOf('=', equalPos + 1);
+            } while (equalPos > 1 && templateArray.get(i).charAt(equalPos - 1) == ' ');
+
             if (equalPos == -1) {
-              positionalArgs.add(templateArray[i]);
+              positionalArgs.add(templateArray.get(i));
             } else {
-              namedArgs.put(templateArray[i].substring(0, equalPos), templateArray[i].substring(equalPos + 1));
+              namedArgs.put(templateArray.get(i).substring(0, equalPos), templateArray.get(i).substring(equalPos + 1));
             }
           }
           callback.onTemplate(positionalArgs, namedArgs);
@@ -138,10 +149,115 @@ public class WikiParser {
           callback.onText(rest.substring(5, end));
           rest = rest.substring(end + 6);
         } else {
-          throw new RuntimeException("barf!");
+          throw new RuntimeException("barf: " + rest);
         }
       }  // matcher.find()
     }
   }
+  
+  private static final Pattern openBracketOrPipe = Pattern.compile("($)|(\\[\\[)|(\\s*\\|\\s*)");
+  private static void contextSensitivePipeSplit(String template, final List<String> result) {
+    StringBuilder builder = new StringBuilder();
+    while (template.length() > 0) {
+      final Matcher matcher = openBracketOrPipe.matcher(template);
+      if (matcher.find()) {
+        // append to the match.
+        builder.append(template.substring(0, matcher.start()));
+        if (matcher.group(2) != null) {  // [[
+          // append to the close ]].
+          final int closeIndex = template.indexOf("]]", matcher.end());
+          builder.append(template.substring(matcher.start(), closeIndex + 2));
+          template = template.substring(closeIndex + 2);
+        } else if (matcher.group(3) != null) { // |
+          result.add(builder.toString());
+          builder = new StringBuilder();
+          template = template.substring(matcher.end());
+        } else {
+          template = template.substring(matcher.start());
+          assert template.length() == 0 : template;
+        }
+      } else {
+        assert false;
+      }
+    }
+    result.add(builder.toString());
+  }
+
+  // ------------------------------------------------------------------------
+
+  public static String simpleParse(final String wikiText) {
+    final StringBuilderCallback callback = new StringBuilderCallback();
+    parse(wikiText, callback);
+    return callback.builder.toString();
+  }
+  
+  static final class StringBuilderCallback implements WikiCallback {
+
+    final StringBuilder builder = new StringBuilder();
+    
+    @Override
+    public void onComment(String text) {
+    }
+
+    @Override
+    public void onFormatBold(boolean boldOn) {
+    }
+
+    @Override
+    public void onFormatItalic(boolean italicOn) {
+    }
+
+    @Override
+    public void onWikiLink(String[] args) {
+      builder.append(args[args.length - 1]);
+    }
+
+    @Override
+    public void onTemplate(List<String> positionalArgs,
+        Map<String, String> namedArgs) {
+      builder.append("{{").append(positionalArgs).append(namedArgs).append("}}");
+    }
+
+    @Override
+    public void onText(String text) {
+      builder.append(text);
+    }
+
+    @Override
+    public void onHeadingStart(int depth) {
+    }
+
+    @Override
+    public void onHeadingEnd(int depth) {
+    }
+
+    @Override
+    public void onNewLine() {
+    }
+
+    @Override
+    public void onNewParagraph() {
+    }
+
+    @Override
+    public void onListItemStart(String header, int[] section) {
+    }
+
+    @Override
+    public void onListItemEnd(String header, int[] section) {
+    }
+
+    @Override
+    public void onUnterminated(String start, String rest) {
+      throw new RuntimeException(start + rest);
+    }
+
+    @Override
+    public void onInvalidHeaderEnd(String rest) {
+      throw new RuntimeException(rest);
+    }
+    
+  }
+
 
 }
index 922b0bc973d293e42b46bff51ca989538c86783a..813b073992c2ae102057ad8cf824d3594a61603e 100644 (file)
@@ -18,12 +18,13 @@ public class WikiParserTest extends TestCase {
       "asdf\n" + 
       "# li" + "\n" +
       "# li2" + "\n" +
+      "# {{template_in_list}}" + "\n" +
       "## li2.2" + "\n" +
       "Hi again." + "\n" +
       "[[wikitext]]:[[wikitext]]" + "\n" +  // don't want this to trigger a list
       "here's [[some blah|some]] wikitext." + "\n" +
-      "here's a {{template|blah=2|blah2=3|" + "\n" +
-      "blah3=3}} and some more text." + "\n" +
+      "here's a {{template|this has an = sign|blah=2|blah2=3|" + "\n" +
+      "blah3=3,[[asdf]|[asdf asdf]|[asdf asdf asdf]],blah4=4}} and some more text." + "\n" +
       "== Header 2 ==" + "\n" +
 //      "==== Header 4 ====" + "\n" +
 //      "===== Header 5 =====" + "\n" +
@@ -38,12 +39,15 @@ public class WikiParserTest extends TestCase {
         "\n" +
         "\n" +
         " asdf\n" +
-        "# li\n" +
-        "# li2\n" +
-        "## li2.2\n" +
+        "LIST (#) li\n" +
+        "LIST (#) li2\n" +
+        "LIST (#) \n" +
+        "template:[template_in_list]{}\n" +
+        "\n" + 
+        "LIST (##) li2.2\n" +
         "\n" +
         " Hi again. [[wikitext]]:[[wikitext]] here's [[some]] wikitext. here's a \n" +
-        "template:[template]{blah=2, blah2=3, blah3=3}\n" +
+        "template:[template, this has an = sign]{blah=2, blah2=3, blah3=3,[[asdf]|[asdf asdf]|[asdf asdf asdf]],blah4=4}\n" +
         " and some more text.\n" +
         "HEADER   Header 2 \n" +
         "\n" +
@@ -116,7 +120,7 @@ public class WikiParserTest extends TestCase {
 
     @Override
     public void onListItemStart(String header, int[] section) {
-      builder.append("\n").append(header);
+      builder.append("\n").append("LIST (").append(header).append(")");
     }
 
     @Override
index 0a1a32bf5e29c1ed52452d2435df0a405ec4e627..c677d13be37280f454b9e3b548ba285adda8534e 100644 (file)
@@ -2,25 +2,35 @@ package com.hughes.android.dictionary.parser;
 
 import java.util.ArrayList;
 import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+import com.hughes.android.dictionary.engine.DictionaryBuilder;
+import com.hughes.android.dictionary.engine.EntryData;
+import com.hughes.android.dictionary.engine.EntryTypeName;
+import com.hughes.android.dictionary.engine.IndexBuilder;
+import com.hughes.android.dictionary.engine.PairEntry;
+import com.hughes.android.dictionary.engine.PairEntry.Pair;
+import com.hughes.util.ListUtil;
 
 public class WikiWord {
   final int depth;
   
+  final String title;
   String language;
+
+  int index;
   
   final Map<String, StringBuilder> accentToPronunciation = new LinkedHashMap<String, StringBuilder>();
   StringBuilder currentPronunciation = null;
 
-  boolean isLang1;
-  boolean isLang2;
-  
   final List<PartOfSpeech> partsOfSpeech = new ArrayList<WikiWord.PartOfSpeech>();
   
-  final Map<String, List<String>> otherSections = new LinkedHashMap<String, List<String>>();
-  
-  public WikiWord(int depth) {
+  public WikiWord(final String title, int depth) {
+    this.title = title.intern();
     this.depth = depth;
   }
 
@@ -28,35 +38,294 @@ public class WikiWord {
     final int depth;
     final String name;
 
-    final List<Meaning> meaning = new ArrayList<WikiWord.Meaning>();
+    final List<Meaning> meanings = new ArrayList<WikiWord.Meaning>();
+    
+    final List<TranslationSense> translationSenses = new ArrayList<WikiWord.TranslationSense>();
+    
+    final List<FormOf> formOfs = new ArrayList<WikiWord.FormOf>();
     
-    final List<TranslationSection> translationSections = new ArrayList<WikiWord.TranslationSection>();
-        
-    final Map<String, String> otherSections = new LinkedHashMap<String, String>();
-
     public PartOfSpeech(final int depth, String name) {
       this.depth = depth;
-      this.name = name;
+      this.name = name.intern();
+    }
+
+    public Meaning newMeaning() {
+      final Meaning meaning = new Meaning();
+      meanings.add(meaning);
+      return meaning;
+    }
+
+    public Meaning lastMeaning() {
+      return meanings.isEmpty() ? newMeaning() : ListUtil.getLast(meanings);
     }
   }
   
-  static class TranslationSection {
+  static class TranslationSense {
     String sense;
-    List<List<String>> translations = new ArrayList<List<String>>();
+    List<List<Translation>> translations = new ArrayList<List<Translation>>();
     {
-      translations.add(new ArrayList<String>());
-      translations.add(new ArrayList<String>());
+      translations.add(new ArrayList<Translation>());
+      translations.add(new ArrayList<Translation>());
+    }
+  }
+  
+  static class Translation {
+    String language;
+    String text;
+    
+    public Translation(final String language, final String text) {
+      this.language = language;
+      this.text = text;
+    }
+
+    @Override
+    public String toString() {
+      return language + ": " + text;
+    }
+  }
+  
+  static class FormOf {
+    final String grammarForm;
+    final String target;
+    
+    public FormOf(final String grammarForm, final String token) {
+      this.grammarForm = grammarForm;
+      this.target = token;
     }
   }
   
   static class Meaning {
     String meaning;
-    Example example;
+    final List<Example> examples = new ArrayList<WikiWord.Example>();
+    
+    public Example newExample() {
+      final Example example = new Example();
+      this.examples.add(example);
+      return example;
+    }
+
+    public Example lastExample() {
+      return examples.isEmpty() ? newExample() : ListUtil.getLast(examples);
+    }
   }
   
   static class Example {
-    String example;
-    String exampleInEnglish;
+    String source;
+    final StringBuilder example = new StringBuilder();
+    final StringBuilder exampleInEnglish = new StringBuilder();
+  }
+  
+  // -------------------------------------------------------------------------
+  
+  void wikiWordToQuickDic(final DictionaryBuilder dictBuilder, final int enIndexBuilder) {
+    //System.out.println("\n" + title + ", " + language + ", pron=" + accentToPronunciation);
+     if (partsOfSpeech.isEmpty() && title.indexOf(":") == -1 && !language.equals("Translingual")) {
+       System.err.println("Word with no POS: " + title);
+     }
+     for (final WikiWord.PartOfSpeech partOfSpeech : partsOfSpeech) {
+       partOfSpeechToQuickDic(dictBuilder, enIndexBuilder, partOfSpeech);
+     }  // PartOfSpeech
+  }
+
+
+  static final Pattern templateName = Pattern.compile("\\{[^,]*,");
+  private void partOfSpeechToQuickDic(final DictionaryBuilder dictBuilder,
+      final int enIndexBuilder, final WikiWord.PartOfSpeech partOfSpeech) {
+    //System.out.println("  pos: " + partOfSpeech.name);
+         
+     for (final WikiWord.Meaning meaning : partOfSpeech.meanings) {
+       //System.out.println("    meaning: " + meaning.meaning);
+       for (final WikiWord.Example example : meaning.examples) {
+         if (example.example.length() > 0) {
+           //System.out.println("      example: " + example.example);
+         }
+         if (example.exampleInEnglish.length() > 0) {
+           //System.out.println("      exampleInEnglish: " + example.exampleInEnglish);
+         }
+       }
+     }
+     
+     
+     if (index != -1) {
+       final boolean formOfSwap = index != 0;
+       for (final FormOf formOf : partOfSpeech.formOfs) {
+         final Pair pair = new Pair(title + ": " + formOf.grammarForm + ": " + formOf.target, "", formOfSwap);
+         final PairEntry pairEntry = new PairEntry(new Pair[] {pair});
+         final EntryData entryData = new EntryData(dictBuilder.dictionary.pairEntries.size(), pairEntry);
+         dictBuilder.dictionary.pairEntries.add(pairEntry);
+  
+         // File under title token.
+         final Set<String> tokens = DictFileParser.tokenize(formOf.target, DictFileParser.NON_CHAR);
+         dictBuilder.indexBuilders.get(index).addEntryWithTokens(entryData, tokens, EntryTypeName.WIKTIONARY_FORM_OF);
+       }
+     }
+
+     
+     if (enIndexBuilder != -1 && index != -1 && enIndexBuilder != index) {
+       final String entryBase = title + " (" + partOfSpeech.name.toLowerCase() + ")";
+       final boolean swap = enIndexBuilder == 1;
+     
+       // Meanings.
+       for (final Meaning meaning : partOfSpeech.meanings) {
+         final List<Pair> pairs = new ArrayList<PairEntry.Pair>();
+         
+         final List<Set<String>> exampleTokens = new ArrayList<Set<String>>();
+         exampleTokens.add(new LinkedHashSet<String>());
+         exampleTokens.add(new LinkedHashSet<String>());
+         
+         if (meaning.meaning != null && meaning.meaning.length() > 0) {
+           final Pair meaningPair = new Pair(meaning.meaning, entryBase, swap);
+           pairs.add(meaningPair);
+         } else {
+           System.err.println("Empty meaning: " + title + ", " + language + ", " + partOfSpeech.name);
+         }
+           
+         // Examples
+         for (final Example example : meaning.examples) {
+           final int dashIndex = example.example.indexOf("—");
+           if (example.exampleInEnglish.length() == 0 && dashIndex != -1) {
+             System.out.println("Splitting example: title=" + title + ", "+ example.example);
+             example.exampleInEnglish.append(example.example.substring(dashIndex + 1).trim());
+             example.example.delete(dashIndex, example.example.length());
+           }
+           
+           if (example.example.length() > 0 && example.exampleInEnglish.length() > 0) {
+             final Pair pair = new Pair(example.exampleInEnglish.toString(), example.example.toString(), swap);
+             pairs.add(pair);
+             
+             for (int i = 0; i < 2; ++i) {
+               exampleTokens.get(i).addAll(DictFileParser.tokenize(pair.get(i), DictFileParser.NON_CHAR));
+             }
+           }
+         }
+
+         // Create EntryData with the PairEntry.
+         final PairEntry pairEntry = new PairEntry(pairs.toArray(new Pair[0]));
+         final EntryData entryData = new EntryData(dictBuilder.dictionary.pairEntries.size(), pairEntry);
+         dictBuilder.dictionary.pairEntries.add(pairEntry);
+
+         // File under title token.
+         final Set<String> titleTokens = DictFileParser.tokenize(title, DictFileParser.NON_CHAR);
+         dictBuilder.indexBuilders.get(index).addEntryWithTokens(entryData, titleTokens, titleTokens.size() == 1 ? EntryTypeName.WIKTIONARY_TITLE_ONE_WORD : EntryTypeName.WIKTIONARY_TITLE_MULTI_WORD);
+       
+         // File under the meaning tokens (English):
+         if (meaning.meaning != null) {
+           // If the meaning contains any templates, strip out the template name
+           // so we don't index it.
+           final String meaningToIndex = templateName.matcher(meaning.meaning).replaceAll("");
+           final Set<String> meaningTokens = DictFileParser.tokenize(meaningToIndex, DictFileParser.NON_CHAR);
+           dictBuilder.indexBuilders.get(enIndexBuilder).addEntryWithTokens(entryData, meaningTokens, meaningTokens.size() == 1 ? EntryTypeName.WIKTIONARY_MEANING_ONE_WORD : EntryTypeName.WIKTIONARY_MEANING_MULTI_WORD);
+         }
+         
+         // File under other tokens that we saw.
+         for (int i = 0; i < 2; ++i) {
+           dictBuilder.indexBuilders.get(i).addEntryWithTokens(entryData, exampleTokens.get(i), EntryTypeName.WIKTIONARY_EXAMPLE_OTHER_WORDS);
+         }         
+       
+         
+       }  // Meanings.
+       
+       
+     }
+     
+     translationSensesToQuickDic(dictBuilder, enIndexBuilder, partOfSpeech);
   }
 
+
+  private void translationSensesToQuickDic(final DictionaryBuilder dictBuilder,
+      final int enIndexBuilder, final WikiWord.PartOfSpeech partOfSpeech) {
+    if (!partOfSpeech.translationSenses.isEmpty()) {
+       if (!language.equals("English")) {
+         System.err.println("Translation sections not in English.");
+       }
+       
+       final String englishBase = title + " (" + partOfSpeech.name.toLowerCase() + "%s)";
+       
+       final StringBuilder englishPron = new StringBuilder();
+       for (final Map.Entry<String, StringBuilder> accentToPron : accentToPronunciation.entrySet()) {
+         englishPron.append("\n");
+         if (accentToPron.getKey().length() > 0) {
+           englishPron.append(accentToPron.getKey()).append(": ");
+         }
+         englishPron.append(accentToPron.getValue());
+       }
+       
+       for (final TranslationSense translationSense : partOfSpeech.translationSenses) {
+         //System.out.println("    sense: " + translationSense.sense);
+         if (translationSense.sense == null) {
+           //System.err.println("    null sense: " + title);
+         }
+         String englishSense = String.format(englishBase, translationSense.sense != null ? (": " + translationSense.sense) : "");
+         englishSense += englishPron.toString();
+         
+         final StringBuilder[] sideBuilders = new StringBuilder[2];
+         final List<Map<EntryTypeName, List<String>>> sideTokens = new ArrayList<Map<EntryTypeName,List<String>>>();
+         for (int i = 0; i < 2; ++i) {
+           sideBuilders[i] = new StringBuilder();
+           sideTokens.add(new LinkedHashMap<EntryTypeName, List<String>>());
+         }
+         
+         if (enIndexBuilder != -1) {
+           sideBuilders[enIndexBuilder].append(englishSense);
+           addTokens(title, sideTokens.get(enIndexBuilder), EntryTypeName.WIKTIONARY_TITLE_ONE_WORD);
+         }
+         
+         // Get the entries from the translation section.
+         for (int i = 0; i < 2; ++i) {
+           //System.out.println("      lang: " + i);
+           for (final Translation translation : translationSense.translations.get(i)) {
+             //System.out.println("        translation: " + translation);
+             sideBuilders[i].append(sideBuilders[i].length() > 0 ? "\n" : "");
+             if (translationSense.translations.get(i).size() > 1) {
+               sideBuilders[i].append(translation.language).append(": ");
+             }
+             sideBuilders[i].append(translation.text);
+             
+             // TODO: Don't index {m}, {f}
+             // TODO: Don't even show: (1), (1-2), etc.
+             addTokens(translation.text, sideTokens.get(i), EntryTypeName.WIKTIONARY_TRANSLATION_ONE_WORD);
+           }
+         }
+
+         // Construct the Translations-based QuickDic entry for this TranslationSense.
+         if (sideBuilders[0].length() > 0 && sideBuilders[1].length() > 0) {
+           final Pair pair = new Pair(sideBuilders[0].toString(), sideBuilders[1].toString());
+           final PairEntry pairEntry = new PairEntry(new Pair[] { pair });
+           final EntryData entryData = new EntryData(dictBuilder.dictionary.pairEntries.size(), pairEntry);
+           dictBuilder.dictionary.pairEntries.add(pairEntry);
+           
+           // Add the EntryData to the indices under the correct tokens.
+           for (int i = 0; i < 2; ++i) {
+             final IndexBuilder indexBuilder = dictBuilder.indexBuilders.get(i);
+             for (final Map.Entry<EntryTypeName, List<String>> entry : sideTokens.get(i).entrySet()) {
+               for (final String token : entry.getValue()) {
+                 final List<EntryData> entries = indexBuilder.getOrCreateEntries(token, entry.getKey());
+                 entries.add(entryData);
+               }
+             }
+
+           }             
+           
+         }
+       }  // Senses
+     }  // Translations
+  }
+
+  
+  static void addTokens(final String text, final Map<EntryTypeName, List<String>> map,
+      EntryTypeName entryTypeName) {
+    final Set<String> tokens = DictFileParser.tokenize(text, DictFileParser.NON_CHAR);
+    if (tokens.size() > 1 && entryTypeName == EntryTypeName.WIKTIONARY_TITLE_ONE_WORD) {
+      entryTypeName = EntryTypeName.WIKTIONARY_TITLE_MULTI_WORD;
+    }
+    List<String> tokenList = map.get(entryTypeName);
+    if (tokenList == null) {
+      tokenList = new ArrayList<String>();
+      map.put(entryTypeName, tokenList);
+    }
+    tokenList.addAll(tokens);
+  }
+
+
+
 }