1 // Copyright 2011 Google Inc. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 package com.hughes.android.dictionary.engine;
19 import com.hughes.android.dictionary.engine.Index.IndexEntry;
20 import com.hughes.android.dictionary.parser.DictFileParser;
22 public class IndexBuilder {
24 final DictionaryBuilder dictionaryBuilder;
25 public final Index index;
26 final Set<String> stoplist;
28 final Map<String, TokenData> fastTokenToData;
29 final SortedMap<FastCompareString, TokenData> tokenToData;
31 IndexBuilder(final DictionaryBuilder dictionaryBuilder, final String shortName, final String longName, final Language language, final String normalizerRules, final Set<String> stoplist, final boolean swapPairEntries) {
32 this.dictionaryBuilder = dictionaryBuilder;
33 index = new Index(dictionaryBuilder.dictionary, shortName, longName, language, normalizerRules, swapPairEntries, stoplist);
34 tokenToData = new TreeMap<>(new FastNormalizeComparator(index.getSortComparator()));
35 fastTokenToData = new HashMap<>();
36 this.stoplist = stoplist;
40 final Set<IndexedEntry> tokenIndexedEntries = new HashSet<>();
41 final List<RowBase> rows = index.rows;
42 index.mainTokenCount = 0;
43 for (final TokenData tokenData : tokenToData.values()) {
44 tokenIndexedEntries.clear();
45 final int indexIndex = index.sortedIndexEntries.size();
46 final int startRow = rows.size();
48 TokenRow tokenRow = null;
49 if (!tokenData.htmlEntries.isEmpty()) {
50 tokenRow = new TokenRow(indexIndex, rows.size(), index, tokenData.hasMainEntry);
54 // System.out.println("Added TokenRow: " + rows.get(rows.size() - 1));
56 int numRows = 0; // off by one--doesn't count the token row!
57 // System.out.println("TOKEN: " + tokenData.token);
58 for (final Map.Entry<EntryTypeName, List<IndexedEntry>> typeToIndexedEntries : tokenData.typeToEntries.entrySet()) {
59 for (final IndexedEntry indexedEntry : typeToIndexedEntries.getValue()) {
60 if (!indexedEntry.isValid) {
64 if (tokenRow == null) {
65 tokenRow = new TokenRow(indexIndex, rows.size(), index, tokenData.hasMainEntry);
69 if (indexedEntry.entry.index() == -1) {
70 indexedEntry.entry.addToDictionary(dictionaryBuilder.dictionary);
71 assert indexedEntry.entry.index() >= 0;
73 if (tokenIndexedEntries.add(indexedEntry) && !tokenData.htmlEntries.contains(indexedEntry.entry)) {
74 rows.add(indexedEntry.entry.CreateRow(rows.size(), index));
75 ++indexedEntry.entry.entrySource.numEntries;
78 // System.out.print(" " + typeToEntry.getKey() + ": ");
79 // rows.get(rows.size() - 1).print(System.out);
80 // System.out.println();
85 if (tokenRow != null) {
86 if (tokenRow.hasMainEntry) {
87 index.mainTokenCount++;
90 final Index.IndexEntry indexEntry = new Index.IndexEntry(index, tokenData.token, index
91 .normalizer().transliterate(tokenData.token), startRow, numRows);
92 indexEntry.htmlEntries.addAll(tokenData.htmlEntries);
93 index.sortedIndexEntries.add(indexEntry);
97 final List<IndexEntry> entriesSortedByNumRows = new ArrayList<>(index.sortedIndexEntries);
98 entriesSortedByNumRows.sort((object1, object2) -> object2.numRows - object1.numRows);
99 System.out.println("Most common tokens:");
100 for (int i = 0; i < 50 && i < entriesSortedByNumRows.size(); ++i) {
101 System.out.println(" " + entriesSortedByNumRows.get(i));
105 public static class TokenData {
108 final Map<EntryTypeName, List<IndexedEntry>> typeToEntries = new EnumMap<>(EntryTypeName.class);
109 public boolean hasMainEntry = false;
111 public final List<HtmlEntry> htmlEntries = new ArrayList<>();
113 TokenData(final String token) {
114 assert token.equals(token.trim());
115 assert token.length() > 0;
120 public TokenData getOrCreateTokenData(final String token) {
121 TokenData tokenData = fastTokenToData.get(token);
122 if (tokenData != null) return tokenData;
123 tokenData = new TokenData(token);
124 final FastCompareString c = new FastCompareString(token);
125 if (tokenToData.put(c, tokenData) != null) {
126 // The parallel HashMap assumes that the TreeMap Comparator
127 // is compatible with the equals it uses to compare.
128 throw new RuntimeException("TokenData TreeMap and HashMap out of sync, Comparator may be broken?");
130 fastTokenToData.put(token, tokenData);
134 private List<IndexedEntry> getOrCreateEntries(final String token, final EntryTypeName entryTypeName) {
135 final TokenData tokenData = getOrCreateTokenData(token);
136 List<IndexedEntry> entries = tokenData.typeToEntries.get(entryTypeName);
137 if (entryTypeName.mainWord) {
138 tokenData.hasMainEntry = true;
140 if (entries == null) {
141 entries = new ArrayList<>();
142 tokenData.typeToEntries.put(entryTypeName, entries);
147 public void addEntryWithTokens(final IndexedEntry indexedEntry, final Set<String> tokens,
148 final EntryTypeName entryTypeName) {
149 if (indexedEntry == null) {
150 System.out.println("asdfasdf");
152 assert indexedEntry != null;
153 for (final String token : tokens) {
154 if (entryTypeName.overridesStopList || !stoplist.contains(token)) {
155 getOrCreateEntries(token, entryTypeName).add(indexedEntry);
160 public void addEntryWithString(final IndexedEntry indexedEntry, final String untokenizedString,
161 final EntryTypeName entryTypeName) {
162 final Set<String> tokens = DictFileParser.tokenize(untokenizedString, DictFileParser.NON_CHAR);
163 addEntryWithTokens(indexedEntry, tokens, tokens.size() == 1 ? entryTypeName.singleWordInstance : entryTypeName);
166 public void addEntryWithStringNoSingle(final IndexedEntry indexedEntry, final String untokenizedString,
167 final EntryTypeName entryTypeName) {
168 final Set<String> tokens = DictFileParser.tokenize(untokenizedString, DictFileParser.NON_CHAR);
169 addEntryWithTokens(indexedEntry, tokens, entryTypeName);