001: /*
002: * The contents of this file are subject to the
003: * Mozilla Public License Version 1.1 (the "License");
004: * you may not use this file except in compliance with the License.
005: * You may obtain a copy of the License at http://www.mozilla.org/MPL/
006: *
007: * Software distributed under the License is distributed on an "AS IS"
008: * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
009: * See the License for the specific language governing rights and
010: * limitations under the License.
011: *
012: * The Initial Developer of the Original Code is Simulacra Media Ltd.
013: * Portions created by Simulacra Media Ltd are Copyright (C) Simulacra Media Ltd, 2004.
014: *
015: * All Rights Reserved.
016: *
017: * Contributor(s):
018: */
019: package org.openharmonise.rm.search;
020:
021: import java.io.*;
022: import java.util.*;
023:
024: import org.apache.lucene.analysis.*;
025:
026: /**
027: * <p>Extension of Lucene <code>Analyzer</code> for use with Harmonise.
028: *
029: * <p>This is a simple extension of the Lucene <code>Analyzer</code> class to meet the
030: * requirements for more sophisticated text indexing, in particular for use with the Becta
031: * VocabManager application.</p>
032: *
033: * <p>It provides an implementation that uses Lucene's built in implementation of the Porter
034: * stemming algorithm based on lower case tokenization of the input text.</p>
035: *
036: * <p><em>Note:</em> this algorithm is designed specifically for English language text. The stemmer
037: * works uses basic knowledge of English morphology. It is <em>not</em> suitable for other
038: * human languages!</p>
039: *
040: * @author John King
041: * @version $Revision: 1.3 $
042: *
043: */
044: public class HarmoniseAnalyzer extends Analyzer {
045:
046: private Set stopWordsSet;
047:
048: public HarmoniseAnalyzer() {
049: stopWordsSet = StopFilter.makeStopSet(SMART_STOP_WORDS);
050: }
051:
052: /**
053: * @see org.apache.lucene.analysis.Analyzer#tokenStream(java.lang.String, java.io.Reader)
054: */
055: public TokenStream tokenStream(String fieldName, Reader reader) {
056: return new PorterStemFilter(new StopFilter(
057: new LowerCaseTokenizer(reader), stopWordsSet));
058: }
059:
060: /**
061: * Returns <code>true</code> if the specified <code>String</code>
062: * contains a stop word
063: *
064: * @param sArg a <code>String</code> to be inspected for stop words
065: * @return<code>true</code> if the specified <code>String</code>
066: * contains a stop word
067: */
068: public static boolean containsStopWord(String sArg) {
069: boolean bContainsStop = false;
070:
071: List list = Arrays.asList(SMART_STOP_WORDS);
072:
073: StringTokenizer tokenizer = new StringTokenizer(sArg);
074:
075: while (tokenizer.hasMoreTokens()) {
076: String sToken = tokenizer.nextToken();
077:
078: if (list.contains(sToken) == true) {
079: bContainsStop = true;
080: break;
081: }
082: }
083:
084: return bContainsStop;
085: }
086:
087: /**
088: * Returns the array of stop words used by this <code>Analyzer</code>
089: *
090: * @return the array of stop words used by this <code>Analyzer</code>
091: */
092: public static String[] getStopWords() {
093: return SMART_STOP_WORDS;
094: }
095:
096: /*
097: * The list of stop words below is taken from http://www.onjava.com/onjava/2003/01/15/examples/EnglishStopWords.txt
098: * as suggested by Otis Gospodnetic's article at http://www.onjava.com/pub/a/onjava/2003/01/15/lucene.html?page=1
099: *
100: * All licensed as per Apache License, and therefore ok for inclusion here.
101: */
102:
103: // Contributed to Lucene on 2002-09-21 by "John Caron" <caron@unidata.ucar.edu>
104: //
105: // See also:
106: // org.apache.lucene.analysis.StopFilter.makeStopTable()
107: // ftp://ftp.cs.cornell.edu/pub/smart/
108: private static final String SMART_STOP_WORDS[] = { "a", "able",
109: "about", "above", "according", "accordingly", "across",
110: "actually", "after", "afterwards", "again", "against",
111: "all", "allow", "allows", "almost", "alone", "along",
112: "already", "also", "although", "always", "am", "among",
113: "amongst", "an", "and", "another", "any", "anybody",
114: "anyhow", "anyone", "anything", "anyway", "anyways",
115: "anywhere", "apart", "appear", "appreciate", "appropriate",
116: "are", "around", "as", "aside", "ask", "asking",
117: "associated", "at", "available", "away", "awfully", "b",
118: "be", "became", "because", "become", "becomes", "becoming",
119: "been", "before", "beforehand", "behind", "being",
120: "believe", "below", "beside", "besides", "best", "better",
121: "between", "beyond", "both", "brief", "but", "by", "c",
122: "came", "can", "cannot", "cant", "cause", "causes",
123: "certain", "certainly", "changes", "clearly", "co", "com",
124: "come", "comes", "concerning", "consequently", "consider",
125: "considering", "contain", "containing", "contains",
126: "corresponding", "could", "course", "currently", "d",
127: "definitely", "described", "despite", "did", "different",
128: "do", "does", "doing", "done", "down", "downwards",
129: "during", "e", "each", "edu", "eg", "eight", "either",
130: "else", "elsewhere", "enough", "entirely", "especially",
131: "et", "etc", "even", "ever", "every", "everybody",
132: "everyone", "everything", "everywhere", "ex", "exactly",
133: "example", "except", "f", "far", "few", "fifth", "first",
134: "five", "followed", "following", "follows", "for",
135: "former", "formerly", "forth", "four", "from", "further",
136: "furthermore", "g", "get", "gets", "getting", "given",
137: "gives", "go", "goes", "going", "gone", "got", "gotten",
138: "greetings", "h", "had", "happens", "hardly", "has",
139: "have", "having", "he", "hello", "help", "hence", "her",
140: "here", "hereafter", "hereby", "herein", "hereupon",
141: "hers", "herself", "hi", "him", "himself", "his", "hither",
142: "hopefully", "how", "howbeit", "however", "i", "ie", "if",
143: "ignored", "immediate", "in", "inasmuch", "inc", "indeed",
144: "indicate", "indicated", "indicates", "inner", "insofar",
145: "instead", "into", "inward", "is", "it", "its", "itself",
146: "j", "just", "k", "keep", "keeps", "kept", "know", "knows",
147: "known", "l", "last", "lately", "later", "latter",
148: "latterly", "least", "less", "lest", "let", "like",
149: "liked", "likely", "little", "look", "looking", "looks",
150: "ltd", "m", "mainly", "many", "may", "maybe", "me", "mean",
151: "meanwhile", "merely", "might", "more", "moreover", "most",
152: "mostly", "much", "must", "my", "myself", "n", "name",
153: "namely", "nd", "near", "nearly", "necessary", "need",
154: "needs", "neither", "never", "nevertheless", "new", "next",
155: "nine", "no", "nobody", "non", "none", "noone", "nor",
156: "normally", "not", "nothing", "novel", "now", "nowhere",
157: "o", "obviously", "of", "off", "often", "oh", "ok", "okay",
158: "old", "on", "once", "one", "ones", "only", "onto", "or",
159: "other", "others", "otherwise", "ought", "our", "ours",
160: "ourselves", "out", "outside", "over", "overall", "own",
161: "p", "particular", "particularly", "per", "perhaps",
162: "placed", "please", "plus", "possible", "presumably",
163: "probably", "provides", "q", "que", "quite", "qv", "r",
164: "rather", "rd", "re", "really", "reasonably", "regarding",
165: "regardless", "regards", "relatively", "respectively",
166: "right", "s", "said", "same", "saw", "say", "saying",
167: "says", "second", "secondly", "see", "seeing", "seem",
168: "seemed", "seeming", "seems", "seen", "self", "selves",
169: "sensible", "sent", "serious", "seriously", "seven",
170: "several", "shall", "she", "should", "since", "six", "so",
171: "some", "somebody", "somehow", "someone", "something",
172: "sometime", "sometimes", "somewhat", "somewhere", "soon",
173: "sorry", "specified", "specify", "specifying", "still",
174: "sub", "such", "sup", "sure", "t", "take", "taken", "tell",
175: "tends", "th", "than", "thank", "thanks", "thanx", "that",
176: "thats", "the", "their", "theirs", "them", "themselves",
177: "then", "thence", "there", "thereafter", "thereby",
178: "therefore", "therein", "theres", "thereupon", "these",
179: "they", "think", "third", "this", "thorough", "thoroughly",
180: "those", "though", "three", "through", "throughout",
181: "thru", "thus", "to", "together", "too", "took", "toward",
182: "towards", "tried", "tries", "truly", "try", "trying",
183: "twice", "two", "u", "un", "under", "unfortunately",
184: "unless", "unlikely", "until", "unto", "up", "upon", "us",
185: "use", "used", "useful", "uses", "using", "usually",
186: "uucp", "v", "value", "various", "very", "via", "viz",
187: "vs", "w", "want", "wants", "was", "way", "we", "welcome",
188: "well", "went", "were", "what", "whatever", "when",
189: "whence", "whenever", "where", "whereafter", "whereas",
190: "whereby", "wherein", "whereupon", "wherever", "whether",
191: "which", "while", "whither", "who", "whoever", "whole",
192: "whom", "whose", "why", "will", "willing", "wish", "with",
193: "within", "without", "wonder", "would", "would", "x", "y",
194: "yes", "yet", "you", "your", "yours", "yourself",
195: "yourselves", "z", "zero" };
196:
197: }
|