@article{bol-con-12-ab-scalpre-j, author = {Boland, David and Constantinides, George A.}, title = {A Scalable Precision Analysis Framework}, journal = {IEEE Transactions on Multimedia}, year = 2012, month = feb, volume = {15}, number = {2}, pages = {242-256}, doi = {10.1109/TMM.2012.2231666}, comment = {Describes their own method to analyze precision of floating-point errors}, abstract = {In embedded computing, typically some form of silicon area or power budget restricts the potential performance achievable. For algorithms with limited dynamic range, custom hardware accelerators manage to extract significant additional performance for such a budget via mapping operations in the algorithm to fixed-point. However, for complex applications requiring floating-point computation, the potential performance improvement over software is reduced. Nonetheless, custom hardware can still customize the precision of floating-point operators, unlike software which is restricted to IEEE standard single or double precision, to increase the overall performance at the cost of increasing the error observed in the final computational result. Unfortunately, because it is difficult to determine if this error increase is tolerable, this task is rarely performed. We present a new analytical technique to calculate bounds on the range or relative error of output variables, enabling custom hardware accelerators to be tolerant of floating point errors by design. In contrast to existing tools that perform this task, our approach scales to larger examples and obtains tighter bounds, within a smaller execution time. Furthermore, it allows a user to trade the quality of bounds with execution time of the procedure, making it suitable for both small and large-scale algorithms.} }