import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.EnumMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Pattern;
import com.hughes.android.dictionary.DictionaryInfo;
import com.hughes.android.dictionary.DictionaryInfo.IndexInfo;
+import com.hughes.android.dictionary.engine.RowBase.RowKey;
import com.hughes.util.CachingList;
+import com.hughes.util.TransformingList;
import com.hughes.util.raf.RAFList;
import com.hughes.util.raf.RAFSerializable;
import com.hughes.util.raf.RAFSerializer;
+import com.hughes.util.raf.SerializableSerializer;
import com.hughes.util.raf.UniformRAFList;
import com.ibm.icu.text.Collator;
import com.ibm.icu.text.Transliterator;
// persisted
public final List<IndexEntry> sortedIndexEntries;
+
+ // persisted.
+ public final Set<String> stoplist;
// One big list!
// Various sub-types.
// --------------------------------------------------------------------------
- public Index(final Dictionary dict, final String shortName, final String longName, final Language sortLanguage, final String normalizerRules, final boolean swapPairEntries) {
+ public Index(final Dictionary dict, final String shortName, final String longName, final Language sortLanguage, final String normalizerRules, final boolean swapPairEntries, final Set<String> stoplist) {
this.dict = dict;
this.shortName = shortName;
this.longName = longName;
this.normalizerRules = normalizerRules;
this.swapPairEntries = swapPairEntries;
sortedIndexEntries = new ArrayList<IndexEntry>();
+ this.stoplist = stoplist;
rows = new ArrayList<RowBase>();
normalizer = null;
}
+ /**
+ * Deferred initialization because it can be slow.
+ */
public synchronized Transliterator normalizer() {
if (normalizer == null) {
normalizer = Transliterator.createFromRules("", normalizerRules, Transliterator.FORWARD);
return normalizer;
}
+ /**
+ * Note that using this comparator probably involves doing too many text normalizations.
+ */
+ public NormalizeComparator getSortComparator() {
+ return new NormalizeComparator(normalizer(), sortLanguage.getCollator());
+ }
+
public Index(final Dictionary dict, final RandomAccessFile raf) throws IOException {
this.dict = dict;
shortName = raf.readUTF();
mainTokenCount = raf.readInt();
}
sortedIndexEntries = CachingList.create(RAFList.create(raf, IndexEntry.SERIALIZER, raf.getFilePointer()), CACHE_SIZE);
+ if (dict.dictFileVersion >= 4) {
+ stoplist = new SerializableSerializer<Set<String>>().read(raf);
+ } else {
+ stoplist = Collections.emptySet();
+ }
rows = CachingList.create(UniformRAFList.create(raf, new RowBase.Serializer(this), raf.getFilePointer()), CACHE_SIZE);
}
raf.writeInt(mainTokenCount);
}
RAFList.write(raf, sortedIndexEntries, IndexEntry.SERIALIZER);
+ new SerializableSerializer<Set<String>>().write(raf, stoplist);
UniformRAFList.write(raf, (Collection<RowBase>) rows, new RowBase.Serializer(this), 5);
}
public final String token;
private final String normalizedToken;
public final int startRow;
- public final int numRows;
+ public final int numRows; // doesn't count the token row!
static final RAFSerializer<IndexEntry> SERIALIZER = new RAFSerializer<IndexEntry> () {
}
}
+ static final TransformingList.Transformer<IndexEntry, String> INDEX_ENTRY_TO_TOKEN = new TransformingList.Transformer<IndexEntry, String>() {
+ @Override
+ public String transform(IndexEntry t1) {
+ return t1.token;
+ }
+ };
+
+ public IndexEntry findExact(final String exactToken) {
+ final int result = Collections.binarySearch(TransformingList.create(sortedIndexEntries, INDEX_ENTRY_TO_TOKEN), exactToken, getSortComparator());
+ if (result >= 0) {
+ return sortedIndexEntries.get(result);
+ }
+ return null;
+ }
+
public IndexEntry findInsertionPoint(String token, final AtomicBoolean interrupted) {
final int index = findInsertionPointIndex(token, interrupted);
return index != -1 ? sortedIndexEntries.get(index) : null;
return new DictionaryInfo.IndexInfo(shortName, sortedIndexEntries.size(), mainTokenCount);
}
- final List<RowBase> multiWordSearch(final List<String> searchTokens, final AtomicBoolean interrupted) {
+ private static final int MAX_SEARCH_ROWS = 1000;
+
+ private final Map<String,Integer> prefixToNumRows = new LinkedHashMap<String, Integer>();
+ private synchronized final int getUpperBoundOnRowsStartingWith(final String normalizedPrefix, final int maxRows, final AtomicBoolean interrupted) {
+ final Integer numRows = prefixToNumRows.get(normalizedPrefix);
+ if (numRows != null) {
+ return numRows;
+ }
+ final int insertionPointIndex = findInsertionPointIndex(normalizedPrefix, interrupted);
+
+ int rowCount = 0;
+ for (int index = insertionPointIndex; index < sortedIndexEntries.size(); ++index) {
+ if (interrupted.get()) { return -1; }
+ final IndexEntry indexEntry = sortedIndexEntries.get(index);
+ if (!indexEntry.normalizedToken.startsWith(normalizedPrefix)) {
+ break;
+ }
+ rowCount += indexEntry.numRows;
+ if (rowCount > maxRows) {
+ System.out.println("Giving up, too many words with prefix: " + normalizedPrefix);
+ break;
+ }
+ }
+ prefixToNumRows.put(normalizedPrefix, numRows);
+ return rowCount;
+ }
+
+
+ public final List<RowBase> multiWordSearch(final List<String> searchTokens, final AtomicBoolean interrupted) {
+ final long startMills = System.currentTimeMillis();
final List<RowBase> result = new ArrayList<RowBase>();
-
- // Heuristic: use the longest searchToken as the base.
- String searchToken = null;
+
+ final Set<String> normalizedNonStoplist = new LinkedHashSet<String>();
+
+ String bestPrefix = null;
+ int leastRows = Integer.MAX_VALUE;
+ final StringBuilder regex = new StringBuilder();
for (int i = 0; i < searchTokens.size(); ++i) {
if (interrupted.get()) { return null; }
+ final String searchToken = searchTokens.get(i);
final String normalized = normalizeToken(searchTokens.get(i));
// Normalize them all.
searchTokens.set(i, normalized);
- if (searchToken == null || normalized.length() > searchToken.length()) {
- searchToken = normalized;
+
+ if (!stoplist.contains(searchToken)) {
+ if (normalizedNonStoplist.add(normalized)) {
+ final int numRows = getUpperBoundOnRowsStartingWith(normalized, MAX_SEARCH_ROWS, interrupted);
+ if (numRows != -1 && numRows < leastRows) {
+ if (numRows == 0) {
+ // We really are done here.
+ return Collections.emptyList();
+ }
+ leastRows = numRows;
+ bestPrefix = normalized;
+ }
+ }
+ }
+
+ if (regex.length() > 0) {
+ regex.append("[\\s]*");
}
+ regex.append(Pattern.quote(normalized));
}
+ final Pattern pattern = Pattern.compile(regex.toString());
- final int insertionPointIndex = findInsertionPointIndex(searchToken, interrupted);
- if (insertionPointIndex == -1 || interrupted.get()) {
- return null;
+ if (bestPrefix == null) {
+ bestPrefix = searchTokens.get(0);
+ System.out.println("Everything was in the stoplist!");
}
-
- // The things that match.
- // TODO: use a key
- final Map<RowMatchType,Set<RowBase>> matches = new EnumMap<RowMatchType, Set<RowBase>>(RowMatchType.class);
+ System.out.println("Searching using prefix: " + bestPrefix + ", leastRows=" + leastRows + ", searchTokens=" + searchTokens);
+
+ // Place to store the things that match.
+ final Map<RowMatchType,List<RowBase>> matches = new EnumMap<RowMatchType, List<RowBase>>(RowMatchType.class);
for (final RowMatchType rowMatchType : RowMatchType.values()) {
- matches.put(rowMatchType, new LinkedHashSet<RowBase>());
+ if (rowMatchType != RowMatchType.NO_MATCH) {
+ matches.put(rowMatchType, new ArrayList<RowBase>());
+ }
}
- for (int index = insertionPointIndex; index < sortedIndexEntries.size(); ++index) {
- if (interrupted.get()) { return null; }
- final IndexEntry indexEntry = sortedIndexEntries.get(index);
- if (!indexEntry.normalizedToken.equals(searchToken)) {
- break;
- }
-
- for (int rowIndex = indexEntry.startRow; rowIndex < indexEntry.startRow + indexEntry.numRows; ++rowIndex) {
+ int matchCount = 0;
+ final Set<RowKey> cachedRowKeys = new HashSet<RowBase.RowKey>();
+
+// for (final String searchToken : searchTokens) {
+ final String searchToken = bestPrefix;
+
+ final int insertionPointIndex = findInsertionPointIndex(searchToken, interrupted);
+
+ for (int index = insertionPointIndex; index < sortedIndexEntries.size() && matchCount < MAX_SEARCH_ROWS; ++index) {
if (interrupted.get()) { return null; }
- final RowBase row = rows.get(rowIndex);
- final RowMatchType matchType = row.matches(searchTokens, normalizer, swapPairEntries);
- if (matchType != RowMatchType.NO_MATCH) {
- matches.get(matchType).add(row);
+ final IndexEntry indexEntry = sortedIndexEntries.get(index);
+ if (!indexEntry.normalizedToken.startsWith(searchToken)) {
+ break;
+ }
+
+// System.out.println("Searching indexEntry: " + indexEntry.token);
+
+ // Extra +1 to skip token row.
+ for (int rowIndex = indexEntry.startRow + 1; rowIndex < indexEntry.startRow + 1 + indexEntry.numRows && rowIndex < rows.size(); ++rowIndex) {
+ if (interrupted.get()) { return null; }
+ final RowBase row = rows.get(rowIndex);
+ final RowBase.RowKey rowKey = row.getRowKey();
+ if (cachedRowKeys.contains(rowKey)) {
+ continue;
+ }
+ cachedRowKeys.add(rowKey);
+ final RowMatchType matchType = row.matches(searchTokens, pattern, normalizer(), swapPairEntries);
+ if (matchType != RowMatchType.NO_MATCH) {
+ matches.get(matchType).add(row);
+ ++matchCount;
+ }
}
}
+// } // searchTokens
+
+ // Sort them into a reasonable order.
+ final RowBase.LengthComparator lengthComparator = new RowBase.LengthComparator(swapPairEntries);
+ for (final Collection<RowBase> rows : matches.values()) {
+ final List<RowBase> ordered = new ArrayList<RowBase>(rows);
+ Collections.sort(ordered, lengthComparator);
+ result.addAll(ordered);
}
- for (final Set<RowBase> rows : matches.values()) {
- result.addAll(rows);
- }
-
+ System.out.println("searchDuration: " + (System.currentTimeMillis() - startMills));
return result;
}
-
+
private String normalizeToken(final String searchToken) {
if (TransliteratorManager.init(null)) {
final Transliterator normalizer = normalizer();