1 // Copyright 2011 Google Inc. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 package com.hughes.android.dictionary.parser.enwiktionary;
17 import java.io.BufferedInputStream;
18 import java.io.DataInputStream;
19 import java.io.EOFException;
21 import java.io.FileInputStream;
22 import java.io.IOException;
23 import java.util.ArrayList;
24 import java.util.Arrays;
25 import java.util.Collection;
26 import java.util.LinkedHashSet;
27 import java.util.List;
30 import java.util.logging.Level;
31 import java.util.logging.Logger;
32 import java.util.regex.Pattern;
34 import com.hughes.android.dictionary.engine.EntryTypeName;
35 import com.hughes.android.dictionary.engine.IndexBuilder;
36 import com.hughes.android.dictionary.engine.IndexedEntry;
37 import com.hughes.android.dictionary.engine.PairEntry;
38 import com.hughes.android.dictionary.engine.PairEntry.Pair;
39 import com.hughes.android.dictionary.parser.WikiTokenizer;
41 public class EnWiktionaryXmlParser {
43 static final Logger LOG = Logger.getLogger(EnWiktionaryXmlParser.class.getName());
45 // TODO: process {{ttbc}} lines
47 static final Pattern partOfSpeechHeader = Pattern.compile(
48 "Noun|Verb|Adjective|Adverb|Pronoun|Conjunction|Interjection|" +
49 "Preposition|Proper noun|Article|Prepositional phrase|Acronym|" +
50 "Abbreviation|Initialism|Contraction|Prefix|Suffix|Symbol|Letter|" +
51 "Ligature|Idiom|Phrase|\\{\\{acronym\\}\\}|\\{\\{initialism\\}\\}|" +
52 "\\{\\{abbreviation\\}\\}|" +
53 // These are @deprecated:
54 "Noun form|Verb form|Adjective form|Nominal phrase|Noun phrase|" +
55 "Verb phrase|Transitive verb|Intransitive verb|Reflexive verb|" +
56 // These are extras I found:
57 "Determiner|Numeral|Number|Cardinal number|Ordinal number|Proverb|" +
58 "Particle|Interjection|Pronominal adverb" +
59 "Han character|Hanzi|Hanja|Kanji|Katakana character|Syllable");
61 final IndexBuilder enIndexBuilder;
62 final IndexBuilder foreignIndexBuilder;
63 final Pattern langPattern;
64 final Pattern langCodePattern;
67 // State used while parsing.
70 ENGLISH_DEF_OF_FOREIGN,
77 public EnWiktionaryXmlParser(final IndexBuilder enIndexBuilder, final IndexBuilder otherIndexBuilder, final Pattern langPattern, final Pattern langCodePattern, final boolean swap) {
78 this.enIndexBuilder = enIndexBuilder;
79 this.foreignIndexBuilder = otherIndexBuilder;
80 this.langPattern = langPattern;
81 this.langCodePattern = langCodePattern;
86 public void parse(final File file, final int pageLimit) throws IOException {
88 final DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(file)));
90 if (pageLimit >= 0 && pageCount >= pageLimit) {
95 title = dis.readUTF();
96 } catch (EOFException e) {
97 LOG.log(Level.INFO, "EOF reading split.");
101 final String heading = dis.readUTF();
102 final int bytesLength = dis.readInt();
103 final byte[] bytes = new byte[bytesLength];
104 dis.readFully(bytes);
105 final String text = new String(bytes, "UTF8");
107 parseSection(heading, text);
110 if (pageCount % 1000 == 0) {
111 LOG.info("pageCount=" + pageCount);
116 private void parseSection(String heading, final String text) {
117 if (title.startsWith("Wiktionary:") ||
118 title.startsWith("Template:") ||
119 title.startsWith("Appendix:") ||
120 title.startsWith("Category:") ||
121 title.startsWith("Index:") ||
122 title.startsWith("MediaWiki:") ||
123 title.startsWith("TransWiki:") ||
124 title.startsWith("Citations:") ||
125 title.startsWith("Concordance:") ||
126 title.startsWith("Help:")) {
130 heading = heading.replaceAll("=", "").trim();
131 if (heading.equals("English")) {
133 } else if (langPattern.matcher(heading).find()){
134 doForeignWord(heading, text);
139 // -------------------------------------------------------------------------
141 private void doEnglishWord(String text) {
146 final WikiTokenizer wikiTokenizer = new WikiTokenizer(text);
147 while (wikiTokenizer.nextToken() != null) {
149 if (wikiTokenizer.isHeading()) {
150 final String headerName = wikiTokenizer.headingWikiText();
152 if (wikiTokenizer.headingDepth() <= posDepth) {
157 if (partOfSpeechHeader.matcher(headerName).matches()) {
158 posDepth = wikiTokenizer.headingDepth();
159 pos = wikiTokenizer.headingWikiText();
160 // TODO: if we're inside the POS section, we should handle the first title line...
162 } else if (headerName.equals("Translations")) {
164 LOG.info("Translations without POS (but using anyway): " + title);
166 doTranslations(wikiTokenizer, pos);
167 } else if (headerName.equals("Pronunciation")) {
168 //doPronunciation(wikiLineReader);
170 } else if (wikiTokenizer.isFunction()) {
171 final String name = wikiTokenizer.functionName();
172 if (name.equals("head") && pos == null) {
173 LOG.warning("{{head}} without POS: " + title);
179 final AppendAndIndexWikiCallback appendAndIndexWikiCallback = new AppendAndIndexWikiCallback(this);
181 appendAndIndexWikiCallback.functionCallbacks.putAll(FunctionCallbacksDefault.DEFAULT);
184 private void doTranslations(final WikiTokenizer wikiTokenizer, final String pos) {
185 if (title.equals("absolutely")) {
186 //System.out.println();
189 String topLevelLang = null;
191 boolean done = false;
192 while (wikiTokenizer.nextToken() != null) {
193 if (wikiTokenizer.isHeading()) {
194 wikiTokenizer.returnToLineStart();
201 // Check whether we care about this line:
203 if (wikiTokenizer.isFunction()) {
204 final String functionName = wikiTokenizer.functionName();
205 final List<String> positionArgs = wikiTokenizer.functionPositionArgs();
207 if (functionName.equals("trans-top")) {
209 if (wikiTokenizer.functionPositionArgs().size() >= 1) {
210 sense = positionArgs.get(0);
211 // TODO: could emphasize words in [[brackets]] inside sense.
212 sense = WikiTokenizer.toPlainText(sense);
213 //LOG.info("Sense: " + sense);
215 } else if (functionName.equals("trans-bottom")) {
217 } else if (functionName.equals("trans-mid")) {
218 } else if (functionName.equals("trans-see")) {
219 // TODO: would also be nice...
220 } else if (functionName.startsWith("picdic")) {
221 } else if (functionName.startsWith("checktrans")) {
223 } else if (functionName.startsWith("ttbc")) {
224 wikiTokenizer.nextLine();
225 // TODO: would be great to handle ttbc
226 // TODO: Check this: done = true;
228 LOG.warning("Unexpected translation wikifunction: " + wikiTokenizer.token() + ", title=" + title);
230 } else if (wikiTokenizer.isListItem()) {
231 final String line = wikiTokenizer.listItemWikiText();
232 // This line could produce an output...
234 if (line.contains("ich hoan dich gear")) {
235 //System.out.println();
238 // First strip the language and check whether it matches.
239 // And hold onto it for sub-lines.
240 final int colonIndex = line.indexOf(":");
241 if (colonIndex == -1) {
245 final String lang = trim(WikiTokenizer.toPlainText(line.substring(0, colonIndex)));
246 final boolean appendLang;
247 if (wikiTokenizer.listItemPrefix().length() == 1) {
249 final boolean thisFind = langPattern.matcher(lang).find();
253 appendLang = !langPattern.matcher(lang).matches();
254 } else if (topLevelLang == null) {
257 // Two-level -- the only way we won't append is if this second level matches exactly.
258 if (!langPattern.matcher(lang).matches() && !langPattern.matcher(topLevelLang).find()) {
261 appendLang = !langPattern.matcher(lang).matches();
264 String rest = line.substring(colonIndex + 1).trim();
265 if (rest.length() > 0) {
266 doTranslationLine(line, appendLang ? lang : null, pos, sense, rest);
269 } else if (wikiTokenizer.remainderStartsWith("''See''")) {
270 wikiTokenizer.nextLine();
271 LOG.fine("Skipping See line: " + wikiTokenizer.token());
272 } else if (wikiTokenizer.isWikiLink()) {
273 final String wikiLink = wikiTokenizer.wikiLinkText();
274 if (wikiLink.contains(":") && wikiLink.contains(title)) {
275 } else if (wikiLink.contains("Category:")) {
277 LOG.warning("Unexpected wikiLink: " + wikiTokenizer.token() + ", title=" + title);
279 } else if (wikiTokenizer.isNewline() || wikiTokenizer.isMarkup() || wikiTokenizer.isComment()) {
281 final String token = wikiTokenizer.token();
282 if (token.equals("----")) {
284 LOG.warning("Unexpected translation token: " + wikiTokenizer.token() + ", title=" + title);
291 private void doTranslationLine(final String line, final String lang, final String pos, final String sense, final String rest) {
292 state = State.TRANSLATION_LINE;
293 // Good chance we'll actually file this one...
294 final PairEntry pairEntry = new PairEntry();
295 final IndexedEntry indexedEntry = new IndexedEntry(pairEntry);
297 final StringBuilder foreignText = new StringBuilder();
298 appendAndIndexWikiCallback.reset(foreignText, indexedEntry);
299 appendAndIndexWikiCallback.dispatch(rest, foreignIndexBuilder, EntryTypeName.WIKTIONARY_TRANSLATION_OTHER_TEXT);
301 if (foreignText.length() == 0) {
302 LOG.warning("Empty foreignText: " + line);
307 foreignText.insert(0, String.format("(%s) ", lang));
310 StringBuilder englishText = new StringBuilder();
312 englishText.append(title);
314 englishText.append(" (").append(sense).append(")");
315 enIndexBuilder.addEntryWithString(indexedEntry, sense, EntryTypeName.WIKTIONARY_TRANSLATION_SENSE);
318 englishText.append(" (").append(pos.toLowerCase()).append(")");
320 enIndexBuilder.addEntryWithString(indexedEntry, title, EntryTypeName.WIKTIONARY_TITLE_MULTI);
322 final Pair pair = new Pair(trim(englishText.toString()), trim(foreignText.toString()), swap);
323 pairEntry.pairs.add(pair);
324 if (!pairsAdded.add(pair.toString())) {
325 LOG.warning("Duplicate pair: " + pair.toString());
330 Set<String> pairsAdded = new LinkedHashSet<String>();
332 // -------------------------------------------------------------------------
334 private void doForeignWord(final String lang, final String text) {
335 final WikiTokenizer wikiTokenizer = new WikiTokenizer(text);
336 while (wikiTokenizer.nextToken() != null) {
337 if (wikiTokenizer.isHeading()) {
338 final String headingName = wikiTokenizer.headingWikiText();
339 if (headingName.equals("Translations")) {
340 LOG.warning("Translations not in English section: " + title);
341 } else if (headingName.equals("Pronunciation")) {
342 //doPronunciation(wikiLineReader);
343 } else if (partOfSpeechHeader.matcher(headingName).matches()) {
344 doForeignPartOfSpeech(lang, headingName, wikiTokenizer.headingDepth(), wikiTokenizer);
351 static final class ListSection {
352 final String firstPrefix;
353 final String firstLine;
354 final List<String> nextPrefixes = new ArrayList<String>();
355 final List<String> nextLines = new ArrayList<String>();
357 public ListSection(String firstPrefix, String firstLine) {
358 this.firstPrefix = firstPrefix;
359 this.firstLine = firstLine;
363 public String toString() {
364 return firstPrefix + firstLine + "{ " + nextPrefixes + "}";
369 int foreignCount = 0;
370 final Collection<String> wordForms = new ArrayList<String>();
371 boolean titleAppended = false;
373 private void doForeignPartOfSpeech(final String lang, String posHeading, final int posDepth, WikiTokenizer wikiTokenizer) {
374 if (++foreignCount % 1000 == 0) {
375 LOG.info("***" + lang + ", " + title + ", pos=" + posHeading + ", foreignCount=" + foreignCount);
377 if (title.equals("6")) {
378 System.out.println();
381 final StringBuilder foreignBuilder = new StringBuilder();
382 final List<ListSection> listSections = new ArrayList<ListSection>();
384 appendAndIndexWikiCallback.reset(foreignBuilder, null);
385 this.state = State.ENGLISH_DEF_OF_FOREIGN; // TODO: this is wrong, need new category....
386 titleAppended = false;
391 ListSection lastListSection = null;
393 int currentHeadingDepth = posDepth;
394 while (wikiTokenizer.nextToken() != null) {
395 if (wikiTokenizer.isHeading()) {
396 currentHeadingDepth = wikiTokenizer.headingDepth();
398 if (currentHeadingDepth <= posDepth) {
399 wikiTokenizer.returnToLineStart();
404 if (currentHeadingDepth > posDepth) {
405 // TODO: deal with other neat info sections
409 if (wikiTokenizer.isFunction()) {
410 final String name = wikiTokenizer.functionName();
411 final List<String> args = wikiTokenizer.functionPositionArgs();
412 final Map<String,String> namedArgs = wikiTokenizer.functionNamedArgs();
413 // First line is generally a repeat of the title with some extra information.
414 // We need to build up the left side (foreign text, tokens) separately from the
415 // right side (English). The left-side may get paired with multiple right sides.
416 // The left side should get filed under every form of the word in question (singular, plural).
418 // For verbs, the conjugation comes later on in a deeper section.
419 // Ideally, we'd want to file every English entry with the verb
420 // under every verb form coming from the conjugation.
421 // Ie. under "fa": see: "make :: fare" and "do :: fare"
422 // But then where should we put the conjugation table?
423 // I think just under fare. But then we need a way to link to the entry (actually the row, since entries doesn't show up!)
424 // for the conjugation table from "fa".
425 // Would like to be able to link to a lang#token.
427 appendAndIndexWikiCallback.onFunction(wikiTokenizer, name, args, namedArgs);
429 } else if (wikiTokenizer.isListItem()) {
430 final String prefix = wikiTokenizer.listItemPrefix();
431 if (lastListSection != null &&
432 prefix.startsWith(lastListSection.firstPrefix) &&
433 prefix.length() > lastListSection.firstPrefix.length()) {
434 lastListSection.nextPrefixes.add(prefix);
435 lastListSection.nextLines.add(wikiTokenizer.listItemWikiText());
437 lastListSection = new ListSection(prefix, wikiTokenizer.listItemWikiText());
438 listSections.add(lastListSection);
440 } else if (lastListSection != null) {
441 // Don't append anything after the lists, because there's crap.
442 } else if (wikiTokenizer.isWikiLink()) {
444 foreignBuilder.append(wikiTokenizer.wikiLinkText());
446 } else if (wikiTokenizer.isPlainText()) {
448 foreignBuilder.append(wikiTokenizer.token());
450 } else if (wikiTokenizer.isMarkup() || wikiTokenizer.isNewline() || wikiTokenizer.isComment()) {
453 LOG.warning("Unexpected token: " + wikiTokenizer.token());
458 // Here's where we exit.
459 // Should we make an entry even if there are no foreign list items?
460 String foreign = foreignBuilder.toString().trim();
461 if (!titleAppended && !foreign.toLowerCase().startsWith(title.toLowerCase())) {
462 foreign = String.format("%s %s", title, foreign);
464 if (!langPattern.matcher(lang).matches()) {
465 foreign = String.format("(%s) %s", lang, foreign);
467 for (final ListSection listSection : listSections) {
468 doForeignListSection(foreign, title, wordForms, listSection);
474 // Might only want to remove "lang" if it's equal to "zh", for example.
475 static final Set<String> USELESS_WIKI_ARGS = new LinkedHashSet<String>(
484 public boolean entryIsFormOfSomething = false;
486 private void doForeignListSection(final String foreignText, String title, final Collection<String> forms, final ListSection listSection) {
487 state = State.ENGLISH_DEF_OF_FOREIGN;
488 final String prefix = listSection.firstPrefix;
489 if (prefix.length() > 1) {
490 // Could just get looser and say that any prefix longer than first is a sublist.
491 LOG.warning("Prefix too long: " + listSection);
495 final PairEntry pairEntry = new PairEntry();
496 final IndexedEntry indexedEntry = new IndexedEntry(pairEntry);
498 entryIsFormOfSomething = false;
499 final StringBuilder englishBuilder = new StringBuilder();
500 final String mainLine = listSection.firstLine;
501 appendAndIndexWikiCallback.reset(englishBuilder, indexedEntry);
502 appendAndIndexWikiCallback.dispatch(mainLine, enIndexBuilder, EntryTypeName.WIKTIONARY_ENGLISH_DEF);
504 final String english = trim(englishBuilder.toString());
505 if (english.length() > 0) {
506 final Pair pair = new Pair(english, trim(foreignText), this.swap);
507 pairEntry.pairs.add(pair);
508 foreignIndexBuilder.addEntryWithString(indexedEntry, title, entryIsFormOfSomething ? EntryTypeName.WIKTIONARY_IS_FORM_OF_SOMETHING_ELSE : EntryTypeName.WIKTIONARY_TITLE_MULTI);
509 for (final String form : forms) {
510 foreignIndexBuilder.addEntryWithString(indexedEntry, form, EntryTypeName.WIKTIONARY_INFLECTED_FORM_MULTI);
515 String lastForeign = null;
516 for (int i = 0; i < listSection.nextPrefixes.size(); ++i) {
517 final String nextPrefix = listSection.nextPrefixes.get(i);
518 final String nextLine = listSection.nextLines.get(i);
520 // TODO: This splitting is not sensitive to wiki code.
521 int dash = nextLine.indexOf("—");
524 dash = nextLine.indexOf("—");
528 dash = nextLine.indexOf(" - ");
532 if ((nextPrefix.equals("#:") || nextPrefix.equals("##:")) && dash != -1) {
533 final String foreignEx = nextLine.substring(0, dash);
534 final String englishEx = nextLine.substring(dash + mdashLen);
535 final Pair pair = new Pair(formatAndIndexExampleString(englishEx, enIndexBuilder, indexedEntry), formatAndIndexExampleString(foreignEx, foreignIndexBuilder, indexedEntry), swap);
536 if (pair.lang1 != "--" && pair.lang1 != "--") {
537 pairEntry.pairs.add(pair);
540 } else if (nextPrefix.equals("#:") || nextPrefix.equals("##:")){
541 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
542 lastForeign = nextLine;
543 if (pair.lang1 != "--" && pair.lang1 != "--") {
544 pairEntry.pairs.add(pair);
546 } else if (nextPrefix.equals("#::") || nextPrefix.equals("#**")) {
547 if (lastForeign != null && pairEntry.pairs.size() > 0) {
548 pairEntry.pairs.remove(pairEntry.pairs.size() - 1);
549 final Pair pair = new Pair(formatAndIndexExampleString(nextLine, enIndexBuilder, indexedEntry), formatAndIndexExampleString(lastForeign, foreignIndexBuilder, indexedEntry), swap);
550 if (pair.lang1 != "--" || pair.lang2 != "--") {
551 pairEntry.pairs.add(pair);
555 LOG.warning("TODO: English example with no foreign: " + title + ", " + nextLine);
556 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
557 if (pair.lang1 != "--" || pair.lang2 != "--") {
558 pairEntry.pairs.add(pair);
561 } else if (nextPrefix.equals("#*")) {
562 // Can't really index these.
563 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
564 lastForeign = nextLine;
565 if (pair.lang1 != "--" || pair.lang2 != "--") {
566 pairEntry.pairs.add(pair);
568 } else if (nextPrefix.equals("#::*") || nextPrefix.equals("##") || nextPrefix.equals("#*:") || nextPrefix.equals("#:*") || true) {
569 final Pair pair = new Pair("--", formatAndIndexExampleString(nextLine, null, indexedEntry), swap);
570 if (pair.lang1 != "--" || pair.lang2 != "--") {
571 pairEntry.pairs.add(pair);
579 private String formatAndIndexExampleString(final String example, final IndexBuilder indexBuilder, final IndexedEntry indexedEntry) {
581 // if (wikiTokenizer.token().equals("'''")) {
582 // insideTripleQuotes = !insideTripleQuotes;
584 final StringBuilder builder = new StringBuilder();
585 appendAndIndexWikiCallback.reset(builder, indexedEntry);
586 appendAndIndexWikiCallback.entryTypeName = EntryTypeName.WIKTIONARY_EXAMPLE;
587 appendAndIndexWikiCallback.entryTypeNameSticks = true;
589 // TODO: this is a hack needed because we don't safely split on the dash.
590 appendAndIndexWikiCallback.dispatch(example, indexBuilder, EntryTypeName.WIKTIONARY_EXAMPLE);
591 } catch (AssertionError e) {
594 final String result = trim(builder.toString());
595 return result.length() > 0 ? result : "--";
599 private void itConjAre(List<String> args, Map<String, String> namedArgs) {
600 final String base = args.get(0);
601 final String aux = args.get(1);
603 putIfMissing(namedArgs, "inf", base + "are");
604 putIfMissing(namedArgs, "aux", aux);
605 putIfMissing(namedArgs, "ger", base + "ando");
606 putIfMissing(namedArgs, "presp", base + "ante");
607 putIfMissing(namedArgs, "pastp", base + "ato");
609 putIfMissing(namedArgs, "pres1s", base + "o");
610 putIfMissing(namedArgs, "pres2s", base + "i");
611 putIfMissing(namedArgs, "pres3s", base + "a");
612 putIfMissing(namedArgs, "pres1p", base + "iamo");
613 putIfMissing(namedArgs, "pres2p", base + "ate");
614 putIfMissing(namedArgs, "pres3p", base + "ano");
616 putIfMissing(namedArgs, "imperf1s", base + "avo");
617 putIfMissing(namedArgs, "imperf2s", base + "avi");
618 putIfMissing(namedArgs, "imperf3s", base + "ava");
619 putIfMissing(namedArgs, "imperf1p", base + "avamo");
620 putIfMissing(namedArgs, "imperf2p", base + "avate");
621 putIfMissing(namedArgs, "imperf3p", base + "avano");
623 putIfMissing(namedArgs, "prem1s", base + "ai");
624 putIfMissing(namedArgs, "prem2s", base + "asti");
625 putIfMissing(namedArgs, "prem3s", base + "ò");
626 putIfMissing(namedArgs, "prem1p", base + "ammo");
627 putIfMissing(namedArgs, "prem2p", base + "aste");
628 putIfMissing(namedArgs, "prem3p", base + "arono");
630 putIfMissing(namedArgs, "fut1s", base + "erò");
631 putIfMissing(namedArgs, "fut2s", base + "erai");
632 putIfMissing(namedArgs, "fut3s", base + "erà");
633 putIfMissing(namedArgs, "fut1p", base + "eremo");
634 putIfMissing(namedArgs, "fut2p", base + "erete");
635 putIfMissing(namedArgs, "fut3p", base + "eranno");
637 putIfMissing(namedArgs, "cond1s", base + "erei");
638 putIfMissing(namedArgs, "cond2s", base + "eresti");
639 putIfMissing(namedArgs, "cond3s", base + "erebbe");
640 putIfMissing(namedArgs, "cond1p", base + "eremmo");
641 putIfMissing(namedArgs, "cond2p", base + "ereste");
642 putIfMissing(namedArgs, "cond3p", base + "erebbero");
643 // Subjunctive / congiuntivo
644 putIfMissing(namedArgs, "sub123s", base + "i");
645 putIfMissing(namedArgs, "sub1p", base + "iamo");
646 putIfMissing(namedArgs, "sub2p", base + "iate");
647 putIfMissing(namedArgs, "sub3p", base + "ino");
648 // Imperfect subjunctive
649 putIfMissing(namedArgs, "impsub12s", base + "assi");
650 putIfMissing(namedArgs, "impsub3s", base + "asse");
651 putIfMissing(namedArgs, "impsub1p", base + "assimo");
652 putIfMissing(namedArgs, "impsub2p", base + "aste");
653 putIfMissing(namedArgs, "impsub3p", base + "assero");
655 putIfMissing(namedArgs, "imp2s", base + "a");
656 putIfMissing(namedArgs, "imp3s", base + "i");
657 putIfMissing(namedArgs, "imp1p", base + "iamo");
658 putIfMissing(namedArgs, "imp2p", base + "ate");
659 putIfMissing(namedArgs, "imp3p", base + "ino");
662 itConj(args, namedArgs);
666 private void itConj(List<String> args, Map<String, String> namedArgs) {
667 // TODO Auto-generated method stub
672 private static void putIfMissing(final Map<String, String> namedArgs, final String key,
673 final String value) {
674 final String oldValue = namedArgs.get(key);
675 if (oldValue == null || oldValue.length() == 0) {
676 namedArgs.put(key, value);
680 // TODO: check how ='' and =| are manifested....
681 // TODO: get this right in -are
682 private static void putOrNullify(final Map<String, String> namedArgs, final String key,
683 final String value) {
684 final String oldValue = namedArgs.get(key);
685 if (oldValue == null/* || oldValue.length() == 0*/) {
686 namedArgs.put(key, value);
688 if (oldValue.equals("''")) {
689 namedArgs.put(key, "");
694 static final Pattern whitespace = Pattern.compile("\\s+");
695 static String trim(final String s) {
696 return whitespace.matcher(s).replaceAll(" ").trim();