@misc{ceb-kre-cho-lud-05-aa-expert, author = {Ceberio, Martine and Kreinovich, Vladik and Chopra, Sanjeev and Lud{\"a}scher, Bertram}, title = {{Taylor}-Type Techniques for Handling Uncertainty in Expert Systems, with Potential Applications to Geoinformatics}, howpublished = {Online document}, year = 2005, month = apr, note = {Date extracted from PDF metadata}, comment = {Uses AA to compute probabilities, so that $p \vee \neg p$ is $[1_1]$ not $[0_1]$.}, abstract = {Expert knowledge consists of statements $S_j$ (facts and rules). The expert’s degree of confidence in each statement $S_j$ can be described as a (subjective) probability (some probabilities are known to be independent). Examples: if we are interested in oil, we should look at seismic data (confidence 90{\%}); a bank $A$ trusts a client $B$, so if we trust $A$, we should trust $B$ too (confidence 99{\%}). If a query $Q$ is deducible from facts and rules, what is our confidence $p(Q)$ in $Q$? We can describe $Q$ as a propositional formula $F$ in terms of $S_j$ ; computing $p(Q)$ exactly is NP-hard, so heuristics are needed. Traditionally, expert systems use technique similar to straightforward interval computations: we parse $F$ and replace each computation step with corresponding probability operation. Problem: at each step, we ignore the dependence between the intermediate results F_{j} ; hence intervals are too wide. Example: the estimate for $P(A\vee\neg A)$ is not 1. Solution: similarly to affine arithmetic, besides $P(F_{j} )$, we also compute $P(F_{j} \wedge F_{i})$ (or $P(F_{j1} \wedge . . . \wedge F_{jk}$ )), and on each step, use all combinations of l such probabilities to get new estimates. Results: e.g., $P(A \vee \neg A)$ is estimated as 1.} }