001: /******************************************************************
002: * File: FBRuleInfGraph.java
003: * Created by: Dave Reynolds
004: * Created on: 28-May-2003
005: *
006: * (c) Copyright 2003, 2004, 2005, 2006, 2007, 2008 Hewlett-Packard Development Company, LP
007: * [See end of file]
008: * $Id: OrigFBRuleInfGraph.java,v 1.10 2008/01/02 12:09:44 andy_seaborne Exp $
009: *****************************************************************/package com.hp.hpl.jena.reasoner.rulesys.impl.oldCode;
010:
011: import com.hp.hpl.jena.reasoner.rulesys.impl.*;
012: import com.hp.hpl.jena.reasoner.rulesys.*;
013: import com.hp.hpl.jena.reasoner.transitiveReasoner.*;
014: import com.hp.hpl.jena.reasoner.*;
015: import com.hp.hpl.jena.graph.*;
016:
017: import java.util.*;
018:
019: //import com.hp.hpl.jena.util.PrintUtil;
020: import com.hp.hpl.jena.util.OneToManyMap;
021: import com.hp.hpl.jena.util.iterator.*;
022: import com.hp.hpl.jena.vocabulary.RDFS;
023: import com.hp.hpl.jena.vocabulary.ReasonerVocabulary; //import com.hp.hpl.jena.util.PrintUtil;
024: //import com.hp.hpl.jena.vocabulary.RDF;
025:
026: import org.apache.commons.logging.Log;
027: import org.apache.commons.logging.LogFactory;
028:
029: /**
030: * An inference graph that uses a mixture of forward and backward
031: * chaining rules. The forward rules can create direct deductions from
032: * the source data and schema and can also create backward rules. A
033: * query is answered by consulting the union of the raw data, the forward
034: * derived results and any relevant backward rules (whose answers are tabled
035: * for future reference).
036: *
037: * @author <a href="mailto:der@hplb.hpl.hp.com">Dave Reynolds</a>
038: * @version $Revision: 1.10 $ on $Date: 2008/01/02 12:09:44 $
039: */
040: public class OrigFBRuleInfGraph extends BasicForwardRuleInfGraph
041: implements BackwardRuleInfGraphI {
042:
043: /** Single context for the reasoner, used when passing information to builtins */
044: protected BBRuleContext context;
045:
046: /** A finder that searches across the data, schema, axioms and forward deductions*/
047: protected Finder dataFind;
048:
049: /** The core backward rule engine which includes all the memoized results */
050: protected BRuleEngine bEngine;
051:
052: /** The original rule set as supplied */
053: protected List rawRules;
054:
055: /** The rule list after possible extension by preprocessing hooks */
056: protected List rules;
057:
058: /** Static switch from Basic to RETE implementation of the forward component */
059: public static boolean useRETE = true;
060:
061: /** Flag, if true then subClass and subProperty lattices will be optimized using TGCs */
062: protected boolean useTGCCaching = false;
063:
064: /** Optional precomputed cache of the subClass/subproperty lattices */
065: protected TransitiveEngine transitiveEngine;
066:
067: /** Optional list of preprocessing hooks to be run in sequence during preparation time */
068: protected List preprocessorHooks;
069:
070: /** Cache of temporary property values inferred through getTemp calls */
071: protected TempNodeCache tempNodecache;
072:
073: static Log logger = LogFactory.getLog(FBRuleInfGraph.class);
074:
075: // =======================================================================
076: // Constructors
077:
078: /**
079: * Constructor.
080: * @param reasoner the reasoner which created this inf graph instance
081: * @param schema the (optional) schema graph to be included
082: */
083: public OrigFBRuleInfGraph(Reasoner reasoner, Graph schema) {
084: super (reasoner, schema);
085: bEngine = new BRuleEngine(this );
086: tempNodecache = new TempNodeCache(this );
087: }
088:
089: /**
090: * Constructor.
091: * @param reasoner the reasoner which created this inf graph instance
092: * @param rules the rules to process
093: * @param schema the (optional) schema graph to be included
094: */
095: public OrigFBRuleInfGraph(Reasoner reasoner, List rules,
096: Graph schema) {
097: super (reasoner, rules, schema);
098: this .rawRules = rules;
099: bEngine = new BRuleEngine(this );
100: tempNodecache = new TempNodeCache(this );
101: }
102:
103: /**
104: * Constructor.
105: * @param reasoner the reasoner which created this inf graph instance
106: * @param rules the rules to process
107: * @param schema the (optional) schema graph to be included
108: * @param data the data graph to be processed
109: */
110: public OrigFBRuleInfGraph(Reasoner reasoner, List rules,
111: Graph schema, Graph data) {
112: super (reasoner, rules, schema, data);
113: this .rawRules = rules;
114: bEngine = new BRuleEngine(this );
115: tempNodecache = new TempNodeCache(this );
116: }
117:
118: /**
119: * Instantiate the forward rule engine to use.
120: * Subclasses can override this to switch to, say, a RETE imlementation.
121: * @param rules the rule set or null if there are not rules bound in yet.
122: */
123: protected void instantiateRuleEngine(List rules) {
124: if (rules != null) {
125: if (useRETE) {
126: engine = new RETEEngine(this , rules);
127: } else {
128: engine = new FRuleEngine(this , rules);
129: }
130: } else {
131: if (useRETE) {
132: engine = new RETEEngine(this );
133: } else {
134: engine = new FRuleEngine(this );
135: }
136: }
137: }
138:
139: /**
140: * Instantiate the optional caches for the subclass/suproperty lattices.
141: * Unless this call is made the TGC caching will not be used.
142: */
143: public void setUseTGCCache() {
144: useTGCCaching = true;
145: if (schemaGraph != null) {
146: transitiveEngine = new TransitiveEngine(
147: ((OrigFBRuleInfGraph) schemaGraph).transitiveEngine);
148: } else {
149: transitiveEngine = new TransitiveEngine(
150: new TransitiveGraphCache(
151: ReasonerVocabulary.directSubClassOf
152: .asNode(), RDFS.subClassOf.asNode()),
153: new TransitiveGraphCache(
154: ReasonerVocabulary.directSubPropertyOf
155: .asNode(), RDFS.subPropertyOf
156: .asNode()));
157: }
158: }
159:
160: // =======================================================================
161: // Interface between infGraph and the goal processing machinery
162:
163: /**
164: * Search the combination of data and deductions graphs for the given triple pattern.
165: * This may different from the normal find operation in the base of hybrid reasoners
166: * where we are side-stepping the backward deduction step.
167: */
168: public ExtendedIterator findDataMatches(Node subject,
169: Node predicate, Node object) {
170: return dataFind.find(new TriplePattern(subject, predicate,
171: object));
172: }
173:
174: /**
175: * Search the combination of data and deductions graphs for the given triple pattern.
176: * This may different from the normal find operation in the base of hybrid reasoners
177: * where we are side-stepping the backward deduction step.
178: */
179: public ExtendedIterator findDataMatches(TriplePattern pattern) {
180: return dataFind.find(pattern);
181: }
182:
183: /**
184: * Process a call to a builtin predicate
185: * @param clause the Functor representing the call
186: * @param env the BindingEnvironment for this call
187: * @param rule the rule which is invoking this call
188: * @return true if the predicate succeeds
189: */
190: public boolean processBuiltin(ClauseEntry clause, Rule rule,
191: BindingEnvironment env) {
192: if (clause instanceof Functor) {
193: context.setEnv(env);
194: context.setRule(rule);
195: return ((Functor) clause).evalAsBodyClause(context);
196: } else {
197: throw new ReasonerException("Illegal builtin predicate: "
198: + clause + " in rule " + rule);
199: }
200: }
201:
202: /**
203: * Adds a new Backward rule as a rusult of a forward rule process. Only some
204: * infgraphs support this.
205: */
206: public void addBRule(Rule brule) {
207: // logger.debug("Adding rule " + brule);
208: bEngine.addRule(brule);
209: bEngine.reset();
210: }
211:
212: /**
213: * Deletes a new Backward rule as a rules of a forward rule process. Only some
214: * infgraphs support this.
215: */
216: public void deleteBRule(Rule brule) {
217: // logger.debug("Deleting rule " + brule);
218: bEngine.deleteRule(brule);
219: bEngine.reset();
220: }
221:
222: /**
223: * Adds a set of new Backward rules
224: */
225: public void addBRules(List rules) {
226: for (Iterator i = rules.iterator(); i.hasNext();) {
227: Rule rule = (Rule) i.next();
228: // logger.debug("Adding rule " + rule);
229: bEngine.addRule(rule);
230: }
231: bEngine.reset();
232: }
233:
234: /**
235: * Return an ordered list of all registered backward rules. Includes those
236: * generated by forward productions.
237: */
238: public List getBRules() {
239: return bEngine.getAllRules();
240: }
241:
242: /**
243: * Return the originally supplied set of rules, may be a mix of forward
244: * and backward rules.
245: */
246: public List getRules() {
247: return rules;
248: }
249:
250: /**
251: * Return a compiled representation of all the registered
252: * forward rules.
253: */
254: private Object getForwardRuleStore() {
255: return engine.getRuleStore();
256: }
257:
258: /**
259: * Add a new deduction to the deductions graph.
260: */
261: public void addDeduction(Triple t) {
262: getDeductionsGraph().add(t);
263: if (useTGCCaching) {
264: transitiveEngine.add(t);
265: }
266: }
267:
268: /**
269: * Retrieve or create a bNode representing an inferred property value.
270: * @param instance the base instance node to which the property applies
271: * @param prop the property node whose value is being inferred
272: * @param pclass the (optional, can be null) class for the inferred value.
273: * @return the bNode representing the property value
274: */
275: public Node getTemp(Node instance, Node prop, Node pclass) {
276: return tempNodecache.getTemp(instance, prop, pclass);
277: }
278:
279: // =======================================================================
280: // Core inf graph methods
281:
282: /**
283: * Add a new rule to the rule set. This should only be used by implementations
284: * of RuleProprocessHook (which are called during rule system preparation phase).
285: * If called at other times the rule won't be correctly transferred into the
286: * underlying engines.
287: */
288: public void addRuleDuringPrepare(Rule rule) {
289: if (rules == rawRules) {
290: // Ensure the original is preserved in case we need to do a restart
291: if (rawRules instanceof ArrayList) {
292: rules = (ArrayList) ((ArrayList) rawRules).clone();
293: } else {
294: rules = new ArrayList(rawRules);
295: }
296: // Rebuild the forward engine to use the cloned rules
297: instantiateRuleEngine(rules);
298: }
299: rules.add(rule);
300: }
301:
302: /**
303: * Add a new preprocessing hook defining an operation that
304: * should be run when the preparation phase is underway.
305: */
306: public void addPreprocessingHook(RulePreprocessHook hook) {
307: if (preprocessorHooks == null) {
308: preprocessorHooks = new ArrayList();
309: }
310: preprocessorHooks.add(hook);
311: }
312:
313: /**
314: * Perform any initial processing and caching. This call is optional. Most
315: * engines either have negligable set up work or will perform an implicit
316: * "prepare" if necessary. The call is provided for those occasions where
317: * substantial preparation work is possible (e.g. running a forward chaining
318: * rule system) and where an application might wish greater control over when
319: * this prepration is done.
320: */
321: public void prepare() {
322: if (!isPrepared) {
323: isPrepared = true;
324:
325: // Restore the original pre-hookProcess rules
326: rules = rawRules;
327:
328: // Is there any data to bind in yet?
329: Graph data = null;
330: if (fdata != null)
331: data = fdata.getGraph();
332:
333: // initilize the deductions graph
334: fdeductions = new FGraph(Factory.createGraphMem());
335: dataFind = (data == null) ? fdeductions : FinderUtil
336: .cascade(fdeductions, fdata);
337:
338: // Initialize the optional TGC caches
339: if (useTGCCaching) {
340: if (schemaGraph != null) {
341: // Check if we can just reuse the copy of the raw
342: if ((transitiveEngine.checkOccurance(
343: TransitiveReasoner.subPropertyOf, data)
344: || transitiveEngine
345: .checkOccurance(
346: TransitiveReasoner.subClassOf,
347: data)
348: || transitiveEngine.checkOccurance(
349: RDFS.domain.asNode(), data) || transitiveEngine
350: .checkOccurance(RDFS.range.asNode(), data))) {
351:
352: // The data graph contains some ontology knowledge so split the caches
353: // now and rebuild them using merged data
354: transitiveEngine
355: .insert(
356: ((OrigFBRuleInfGraph) schemaGraph).fdata,
357: fdata);
358: }
359: } else {
360: if (data != null) {
361: transitiveEngine.insert(null, fdata);
362: }
363: }
364: // Insert any axiomatic statements into the caches
365: for (Iterator i = rules.iterator(); i.hasNext();) {
366: Rule r = (Rule) i.next();
367: if (r.bodyLength() == 0) {
368: // An axiom
369: for (int j = 0; j < r.headLength(); j++) {
370: Object head = r.getHeadElement(j);
371: if (head instanceof TriplePattern) {
372: TriplePattern h = (TriplePattern) head;
373: transitiveEngine.add(h.asTriple());
374: }
375: }
376: }
377: }
378:
379: transitiveEngine.setCaching(true, true);
380: // dataFind = FinderUtil.cascade(subClassCache, subPropertyCache, dataFind);
381: dataFind = FinderUtil.cascade(dataFind,
382: transitiveEngine.getSubClassCache(),
383: transitiveEngine.getSubPropertyCache());
384: }
385:
386: // Call any optional preprocessing hook
387: Finder dataSource = fdata;
388: if (preprocessorHooks != null
389: && preprocessorHooks.size() > 0) {
390: Graph inserts = Factory.createGraphMem();
391: for (Iterator i = preprocessorHooks.iterator(); i
392: .hasNext();) {
393: RulePreprocessHook hook = (RulePreprocessHook) i
394: .next();
395: // The signature is wrong to do this having moved this code into the attic
396: // If reinstanting the old code uncomment the next line and remove the exception
397: // hook.run(this, dataFind, inserts);
398: throw new ReasonerException(
399: "Internal error: attempted to invoke obsoleted reasoner with preprocessing hook");
400: }
401: if (inserts.size() > 0) {
402: FGraph finserts = new FGraph(inserts);
403: dataSource = FinderUtil.cascade(fdata, finserts);
404: dataFind = FinderUtil.cascade(dataFind, finserts);
405: }
406: }
407:
408: boolean rulesLoaded = false;
409: if (schemaGraph != null) {
410: Graph rawPreload = ((InfGraph) schemaGraph)
411: .getRawGraph();
412: if (rawPreload != null) {
413: dataFind = FinderUtil.cascade(dataFind, new FGraph(
414: rawPreload));
415: }
416: rulesLoaded = preloadDeductions(schemaGraph);
417: }
418: if (rulesLoaded) {
419: engine.fastInit(dataSource);
420: } else {
421: // No preload so do the rule separation
422: addBRules(extractPureBackwardRules(rules));
423: engine.init(true, dataSource);
424: }
425: // Prepare the context for builtins run in backwards engine
426: context = new BBRuleContext(this );
427:
428: }
429: }
430:
431: /**
432: * Cause the inference graph to reconsult the underlying graph to take
433: * into account changes. Normally changes are made through the InfGraph's add and
434: * remove calls are will be handled appropriately. However, in some cases changes
435: * are made "behind the InfGraph's back" and this forces a full reconsult of
436: * the changed data.
437: */
438: public void rebind() {
439: if (bEngine != null)
440: bEngine.reset();
441: isPrepared = false;
442: }
443:
444: // Suppressed - not all engines do static compilation. Now done as part of preload phase.
445:
446: // /**
447: // * Create a compiled representation of a list of rules.
448: // * @param rules a list of Rule objects
449: // * @return a datastructure containing precompiled representations suitable
450: // * for initializing FBRuleInfGraphs
451: // */
452: // public static RuleStore compile(List rules) {
453: // Object fRules = FRuleEngine.compile(rules, true);
454: // List bRules = extractPureBackwardRules(rules);
455: // return new RuleStore(rules, fRules, bRules);
456: // }
457: //
458: // /**
459: // * Attach a compiled rule set to this inference graph.
460: // * @param rulestore a compiled set of rules.
461: // */
462: // public void setRuleStore(RuleStore ruleStore) {
463: // this.rules = ruleStore.rawRules;
464: // addBRules(ruleStore.bRules);
465: // engine.setRuleStore(ruleStore.fRuleStore);
466: // }
467:
468: /**
469: * Set the state of the trace flag. If set to true then rule firings
470: * are logged out to the Log at "INFO" level.
471: */
472: public void setTraceOn(boolean state) {
473: super .setTraceOn(state);
474: bEngine.setTraceOn(state);
475: }
476:
477: /**
478: * Set to true to enable derivation caching
479: */
480: public void setDerivationLogging(boolean recordDerivations) {
481: this .recordDerivations = recordDerivations;
482: engine.setDerivationLogging(recordDerivations);
483: bEngine.setDerivationLogging(recordDerivations);
484: if (recordDerivations) {
485: derivations = new OneToManyMap();
486: } else {
487: derivations = null;
488: }
489: }
490:
491: /**
492: * Return the number of rules fired since this rule engine instance
493: * was created and initialized
494: */
495: public long getNRulesFired() {
496: return engine.getNRulesFired() + bEngine.getNRulesFired();
497: }
498:
499: /**
500: * Extended find interface used in situations where the implementator
501: * may or may not be able to answer the complete query. It will
502: * attempt to answer the pattern but if its answers are not known
503: * to be complete then it will also pass the request on to the nested
504: * Finder to append more results.
505: * @param pattern a TriplePattern to be matched against the data
506: * @param continuation either a Finder or a normal Graph which
507: * will be asked for additional match results if the implementor
508: * may not have completely satisfied the query.
509: */
510: public ExtendedIterator findWithContinuation(TriplePattern pattern,
511: Finder continuation) {
512: checkOpen();
513: if (!isPrepared)
514: prepare();
515:
516: ExtendedIterator result = null;
517: if (continuation == null) {
518: result = UniqueExtendedIterator.create(new TopGoalIterator(
519: bEngine, pattern));
520: } else {
521: result = UniqueExtendedIterator.create(
522: new TopGoalIterator(bEngine, pattern)).andThen(
523: continuation.find(pattern));
524: }
525: return result.filterDrop(Functor.acceptFilter);
526: }
527:
528: /**
529: * Returns an iterator over Triples.
530: * This implementation assumes that the underlying findWithContinuation
531: * will have also consulted the raw data.
532: */
533: public ExtendedIterator graphBaseFind(Node subject, Node property,
534: Node object) {
535: return findWithContinuation(new TriplePattern(subject,
536: property, object), null);
537: }
538:
539: /**
540: * Basic pattern lookup interface.
541: * This implementation assumes that the underlying findWithContinuation
542: * will have also consulted the raw data.
543: * @param pattern a TriplePattern to be matched against the data
544: * @return a ExtendedIterator over all Triples in the data set
545: * that match the pattern
546: */
547: public ExtendedIterator find(TriplePattern pattern) {
548: return findWithContinuation(pattern, null);
549: }
550:
551: /**
552: * Flush out all cached results. Future queries have to start from scratch.
553: */
554: public void reset() {
555: bEngine.reset();
556: isPrepared = false;
557: }
558:
559: /**
560: * Add one triple to the data graph, run any rules triggered by
561: * the new data item, recursively adding any generated triples.
562: */
563: public synchronized void performAdd(Triple t) {
564: fdata.getGraph().add(t);
565: if (useTGCCaching) {
566: if (transitiveEngine.add(t))
567: isPrepared = false;
568: }
569: if (isPrepared) {
570: engine.add(t);
571: }
572: bEngine.reset();
573: }
574:
575: /**
576: * Removes the triple t (if possible) from the set belonging to this graph.
577: */
578: public void performDelete(Triple t) {
579: fdata.getGraph().delete(t);
580: if (useTGCCaching) {
581: if (transitiveEngine.delete(t)) {
582: if (isPrepared) {
583: bEngine.deleteAllRules();
584: }
585: isPrepared = false;
586: }
587: }
588: if (isPrepared) {
589: getDeductionsGraph().delete(t);
590: engine.delete(t);
591: }
592: bEngine.reset();
593: }
594:
595: /**
596: * Return a new inference graph which is a clone of the current graph
597: * together with an additional set of data premises. Attempts to the replace
598: * the default brute force implementation by one that can reuse some of the
599: * existing deductions.
600: */
601: public InfGraph cloneWithPremises(Graph premises) {
602: prepare();
603: FBRuleInfGraph graph = new FBRuleInfGraph(getReasoner(),
604: rawRules, this );
605: if (useTGCCaching)
606: graph.setUseTGCCache();
607: graph.setDerivationLogging(recordDerivations);
608: graph.setTraceOn(traceOn);
609: // Implementation note: whilst current tests pass its not clear that
610: // the nested passing of FBRuleInfGraph's will correctly handle all
611: // cases of indirectly bound schema data. If we do uncover a problem here
612: // then either include the raw schema in a Union with the premises or
613: // revert of a more brute force version.
614: graph.rebind(premises);
615: return graph;
616: }
617:
618: // =======================================================================
619: // Helper methods
620:
621: /**
622: * Scan the initial rule set and pick out all the backward-only rules with non-null bodies,
623: * and transfer these rules to the backward engine.
624: */
625: private static List extractPureBackwardRules(List rules) {
626: List bRules = new ArrayList();
627: for (Iterator i = rules.iterator(); i.hasNext();) {
628: Rule r = (Rule) i.next();
629: if (r.isBackward() && r.bodyLength() > 0) {
630: bRules.add(r);
631: }
632: }
633: return bRules;
634: }
635:
636: /**
637: * Adds a set of precomputed triples to the deductions store. These do not, themselves,
638: * fire any rules but provide additional axioms that might enable future rule
639: * firing when real data is added. Used to implement bindSchema processing
640: * in the parent Reasoner.
641: * @return true if the preload was able to load rules as well
642: */
643: protected boolean preloadDeductions(Graph preloadIn) {
644: Graph d = fdeductions.getGraph();
645: OrigFBRuleInfGraph preload = (OrigFBRuleInfGraph) preloadIn;
646: // If the rule set is the same we can reuse those as well
647: if (preload.rules == rules) {
648: // Load raw deductions
649: for (Iterator i = preload.getDeductionsGraph().find(null,
650: null, null); i.hasNext();) {
651: d.add((Triple) i.next());
652: }
653: // Load backward rules
654: addBRules(preload.getBRules());
655: // Load forward rules
656: engine.setRuleStore(preload.getForwardRuleStore());
657: // Add access to raw data
658: return true;
659: } else {
660: return false;
661: }
662: }
663:
664: // /**
665: // * Temporary debuggin support. List the dataFind graph.
666: // */
667: // public void debugListDataFind() {
668: // logger.debug("DataFind contains (ty and sc only:");
669: // for (Iterator i = dataFind.findWithContinuation(new TriplePattern(null, RDF.type.asNode(), null),null); i.hasNext(); ) {
670: // logger.debug(" " + PrintUtil.print(i.next()));
671: // }
672: // for (Iterator i = dataFind.findWithContinuation(new TriplePattern(null, RDFS.subClassOf.asNode(), null),null); i.hasNext(); ) {
673: // logger.debug(" " + PrintUtil.print(i.next()));
674: // }
675: // }
676:
677: // =======================================================================
678: // Inner classes
679:
680: /**
681: * Structure used to wrap up pre-processed/compiled rule sets.
682: */
683: public static class RuleStore {
684:
685: /** The raw rules */
686: protected List rawRules;
687:
688: /** The indexed store used by the forward chainer */
689: protected Object fRuleStore;
690:
691: /** The separated backward rules */
692: protected List bRules;
693:
694: /**
695: * Constructor.
696: */
697: public RuleStore(List rawRules, Object fRuleStore, List bRules) {
698: this .rawRules = rawRules;
699: this .fRuleStore = fRuleStore;
700: this .bRules = bRules;
701: }
702:
703: }
704: }
705:
706: /*
707: (c) Copyright 2003, 2004, 2005, 2006, 2007, 2008 Hewlett-Packard Development Company, LP
708: All rights reserved.
709:
710: Redistribution and use in source and binary forms, with or without
711: modification, are permitted provided that the following conditions
712: are met:
713:
714: 1. Redistributions of source code must retain the above copyright
715: notice, this list of conditions and the following disclaimer.
716:
717: 2. Redistributions in binary form must reproduce the above copyright
718: notice, this list of conditions and the following disclaimer in the
719: documentation and/or other materials provided with the distribution.
720:
721: 3. The name of the author may not be used to endorse or promote products
722: derived from this software without specific prior written permission.
723:
724: THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
725: IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
726: OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
727: IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
728: INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
729: NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
730: DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
731: THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
732: (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
733: THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
734: */
|