@misc{din-egb-din-03-aa-neural, author = {Dinerstein, Jonathan and Egbert, Parris and Dinerstein, Nelson}, title = {Fast and Accurate Interval Arithmetic through Neural Network Approximation}, howpublished = {Online document}, url = {https://www.researchgate.net/publication/265245016_Fast_and_Accurate_Interval_Arithmetic_through_Neural_Network_Approximation}, year = 2003, pages = {12}, month = mar, note = {Date extracted from PDF metadata.}, comment = {Introduces ``neural interval arithmetic''. Apparently trains a neural network $N_1$ to compute bounds for a class of target function. The input of $N-1$ is a vector $u$ of parameters that determines the function within the class. Then trains another NN $N_2$ to compute the approximate error of $N_1$ given the vector $u$. Then analyzes $N_1$ and $N_2$ with a global maximum algorithm to find a reliable error bound. Claims it is better than AA. But in the examples the parameter vector $u$ is a set of sample values of the function. Uses thousands of input-output pairs to train the NNs. Not convincng at all...}, abstract = {Interval arithmetic has become a popular tool for computer graphics. It has been applied to collision detection, ray tracing, implicit curve/surface enumeration, etc. It is useful for general optimization problems such as robust root finding and global maximum/minimum finding. However, interval arithmetic and related techniques (e.g. affine arithmetic) suffer from two significant weaknesses. First, their evaluations are often very conservative, making the techniques useless in many practical situations. Second, they can be much slower than traditional arithmetic. In this paper we present a new approach to computing interval arithmetic: neural network approximation. This naturally provides more accuracy since each traditional interval operation can introduce errors that compound, but a neural emulation approach requires only one approximation. Further, greater overall speed can be achieved due to the higher accuracy.} }