# Last edited on 2022-11-24 15:53:05 by stolfi # Affine arithmetic refs from Google Scholar - lot 8 @inproceedings{fai-san-iva-gu-09-aa-hybrid, author = {Fainekos, Georgios and Sankaranarayanan, Sriram and Ivancic, Franjo and Gupta, Aarti}, title = {Robustness of model-based simulations}, booktitle = {Proceedings of the 30th IEEE Real-Time Systems Symposium (RTSS)}, location = {Washington, US}, pages = {345-354}, year = 2009, month = dec, doi = {10.1109/RTSS.2009.26}, comment = {Developed their own Matlab affine arithmetic library \texttt{AALab}. Uses it and IA to analyze errors in Simulink models, and for reachability in hybrid systems.}, abstract = {This paper proposes a framework for determining the correctness and robustness of simulations of hybrid systems. The focus is on simulations generated from model-based design environments and, in particular, Simulink. The correctness and robustness of the simulation is guaranteed against floating-point rounding errors and system modeling uncertainties. Toward that goal, self-validated arithmetics, such as interval and affine arithmetic, are employed for guaranteed simulation of discrete-time hybrid systems. In the case of continuous-time hybrid systems, self-validated arithmetics are utilized for over-approximations of reachability computations.} } @inproceedings{far-12-aa-abd-junk, author = {Farghaly, Ashraf Salem}, title = {{ABD} 1: {Property}-based Verification}, year = 2012, booktitle = {Proceeding of the 2012 Forum on Specification, Verification and Design Languages (FDL)}, pages = {12-12}, month = sep, url = {https://ieeexplore.ieee.org/abstract/document/6336976}, note = {One page session intro. No DOI.}, comment = {One-page introduction to a session of the proceedings. No useful contents.}, abstract = {The first paper is about the static analysis of constraints that define the environment of an IP, typically a communication protocol, and guarantee that the environment is not over-constrained. The second paper presents the use of affine arithmetic to model typical properties of mixed-signal devices and check the impact of parameter deviations on the system behavior. The third contribution discusses the use of verified CTL properties about a system components to generate efficient component abstractions and verify a global property on their composition.} } @entry{tit-fel-mos-mun-18-aa-precisa, author = {Titolo, Laura and Feli{\'u}, Marco A. and Moscato, Mariano and Mu{\~n}oz, C{\'e}sar A.}, title = {An Abstract Interpretation Framework for the Round-Off Error Analysis of Floating-Point Programs}, booktitle = {Proceedings of the 19th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI)}, location = {Los Angeles, US}, series = {Lecture Notes in Computer Science book series}, volume = {10747}, pages = {516–537}, year = 2018, month = jan, doi = {10.1007/978-3-319-73721-8_24}, comment = {Computes a guaranteed enclosure for the FP roundoff error in each execution path of the program. Also defines a widening (join?) operation to accomodate loops and recursion. Implemented as \texttt{PRECISA} prototype tool.}, abstract = {This paper presents an abstract interpretation framework for the round-off error analysis of floating-point programs. This framework defines a parametric abstract analysis that computes, for each combination of ideal and floating-point execution path of the program, a sound over-approximation of the accumulated floating-point round-off error that may occur. In addition, a Boolean expression that characterizes the input values leading to the computed error approximation is also computed. An abstraction on the control flow of the program is proposed to mitigate the explosion of the number of elements generated by the analysis. Additionally, a widening operator is defined to ensure the convergence of recursive functions and loops. An instantiation of this framework is implemented in the prototype tool PRECiSA that generates formal proof certificates stating the correctness of the computed round-off errors.} } @book{fem-pet-spa-vit-17-aa-solar, author = {Femia, Nicola and Petrone, Giovanni and Spagnuolo, Giovanni and Vitelli, Massimo}, title = {Power Electronics and Control Techniques for Maximum Energy Harvesting in Photovoltaic Systems}, publisher = {CRC}, isbn = {978-1-4665-0690-9}, pages = {366}, year = 2017, month = jul, comment = {Tools for designing and managing solar energy grids. Presumably covers AA.}, abstract = {The book supplies an overview of recent improvements in connecting PV systems to the grid and highlights various solutions that can be used as a starting point for further research and development. It begins with a review of methods for modeling a PV array working in uniform and mismatched conditions. The book then discusses several ways to achieve the best maximum power point tracking (MPPT) performance. A chapter focuses on MPPT efficiency, examining the design of the parameters that affect algorithm performance. The authors also address the maximization of the energy harvested in mismatched conditions, in terms of both power architecture and control algorithms, and discuss the distributed MPPT approach. The final chapter details the design of DC/DC converters, which usually perform the MPPT function, with special emphasis on their energy efficiency.} } @phdthesis{fer-08-aa-rdoff-th, author = {Fern{\'a}ndez, Gabriel Caffarena}, title = {Combined Word-Length Allocation and High-Level Synthesis of Digital Signal Processing Circuits}, school = {Universidad Politécnica de Madrid}, booktitle = {Proceedings of the }, booktitle = {Selected papers from the }, howpublished = {Online document.}, pages = {216}, year = 2008, month = oct, doi = {10.20868/UPM.thesis.1822}, note = {Advisors: Carreras Vaquer, Carlos and Nieto-Taladriz Garcia, Octavio}, comment = {Has an overview of AA. Uses AA to estimate rounding errors in DSP with non-uniform word lengths. Worries about loops and conditionals?}, abstract = {This work is focused on the synthesis of Digital Signal Processing (DSP) circuits usingc specific hardware architectures. Due to its complexity, the design process has been subdivided into separate tasks, thus hindering the global optimization of the resulting systems. The author proposes the study of the combination of two major design tasks, Word-Length Allocation (WLA) and High-Level Synthesis (HLS), aiming at the optimization of DSP implementations using modern Field Programmable Gate Array devices (FPGAs). A multiple word-length approach (MWL) is adopted since it leads to highly optimized implementations. MWL implies the customization of the word-lengths of the signals of an algorithm. This complicates the design, since the number possible assignations between algorithm operations and hardware resources becomes very high. Moreover, this work also considers the use of heterogeneous FPGAs where there are several types of resources: configurable logic-based blocks (LUT-based) and specialized embedded resources. All these issues are addressed in this work and several automatic design techniques are proposed. The contributions of the Thesis cover the fields of WLA, HLS using FPGAs, and the combined application of WLA and HLS for implementation in FPGAs. A thorough approach of HLS has been implemented which considers a complete datapath composed of functional units (FUs), registers and multiplexers, as well as heterogeneous FPGA resources (LUT-based and embedded resources). The approach makes use of a resource library that accounts for MWL effects within the set of resources, thus producing highly optimized architectures. This library includes both LUT-based and embedded FPGA resources, which further increase the power of the HLS task. Another important contribution is the introduction of resource usage metrics suitable for heterogeneous-architecture FPGAs. A novel quantization error estimation based on affine arithmetic (AA) is presented, as well as its practical application to the automatic WLA of LTI and non-linear differentiable DSP systems. The error estimation is based on performing a pre-processing of the algorithm, which produces an expression of the quantization error at the system output. Therefore, the error can be easily computed leading to fast and accurate WLA optimizations. The analysis of the impact of different optimization techniques during WLA on HLS results is also presented. The variance in the obtained results corroborates the fact that it is worth using a single architecture model during WLA and HLS, and this is only possible by means of combining these tasks. The actual combination of WLA and HLS has been firstly performed by using a Mixed Integer Linear Programming (MILP) approach. The results prove the validity of the approach and also provide with insights into the combination of the two tasks that are used to generate heuristic synthesis algorithms. Finally, the global contribution of this thesis is an HLS heuristic algorithm able to perform the combined WLA and HLS of DSP systems for both homogeneous and heterogeneous FPGA architectures. Up to 20% of resource usage reductions are reported, which proves the importance of such a combined approach, providing electronic designers with a design framework that enables highly improved DSP custom hardware implementations.}, url = {{\url{https://dialnet.unirioja.es/servlet/tesis?codigo=87121}}}, quotes = {... A novel quantization error estimation based on affine arithmetic (AA) is presented, as well as its practical application to the automatic WLA of LTI and non-linear differentiable DSP ...} } @inproceedings{fer-fre-ert-16-aa-repsurf, author = {Fernandes, Oliver and Frey, Steffen and Ertl, Thomas}, title = {Interpolation-Based Extraction of Representative Isosurfaces}, booktitle = {Proceedings of the 12th International Symposium on Advances in Visual Computing (ISVC), Part I}, location = {Las Vegas, US}, series = {Lecture Notes in Computer Science book series}, publisher = {}, volume = {10072}, pages = {403–413}, year = 2016, month = dec, doi = {10.1007/978-3-319-50835-1_37}, comment = {Problem is selecting ``representative'' isosurfaces in a tomogram. Mentions AA en passant but does not use it. May be an intersting problem though. Relevant to Ana Paula Malheiros thesis?}, abstract = {We propose a novel technique for the automatic, similarity-based selection of representative surfaces. While our technique can be applied to any set of manifolds, we particularly focus on isosurfaces from volume data. We select representatives from sets of surfaces stemming from varying isovalues or time-dependent data. For selection, our approach interpolates between surfaces using a minimum cost flow solver, and determines whether the interpolate adequately represents the actual surface in-between. For this, we employ the Hausdorff distance as an intuitive measure of the similarity of two components. In contrast to popular contour tree-based approaches which are limited to changes in topology, our approach also accounts for geometric deviations. For interactive visualization, we employ a combination of surface renderings and a graph view that depicts the selected surfaces and their relation. We finally demonstrate the applicability and utility of our approach by means of several data sets from different areas.} } @incollection{fer-gaz-22-aa-globopt, author = {Fern{\'a}ndez, Jos{\'e} and Gazdag-T{\'o}th, Bogl{\'a}rka}, title = {Interval Tools in Branch-and-Bound Methods for Global Optimization}, booktitle = {The Palgrave Handbook of Operations Research}, isbn = {978-3-030-96934-9}, pages = {237–267}, year = 2022, month = jul, doi = {10.1007/978-3-030-96935-6_8}, comment = {Survey paper. Discusses several works that used AA for global optimization.}, abstract = {Interval analysis has been applied with success at solving continuous nonlinear programming problems. In this chapter we review the most relevant research on interval branch-and-bound methods in the period 2011-2021, in particular, bounding rules, discarding/filtering methods, rules for the selection of the next box to be processed and subdivision strategies. No constraint programming techniques nor hybrid proposals are included in the review for the sake of brevity, although we do include a review of the available interval arithmetic libraries for different programming languages as well as software packages with implementations of interval branch-and-bound algorithms. Surprisingly, interval tools have been seldom applied to cope with mixed-integer nonlinear programming problems. This chapter also reviews the proposals in the literature to use interval methods in this type of problems, and suggests how some of the techniques proposed for continuous problems can be adapted for mixed-integer problems.} } @entry{fic-08-intuit-junk, author = {J Fichot}, title = {G{\"o}del, constructivity, impredicativity, and feasibility}, journal = {One Hundred Years of Intuitionism (1907\uc{2013}2007),}, booktitle = {Proceedings of the Conference ``One Hundred Years of Intuitionism (1907-2007)''}, location = {Cerisy, FR}, series = {Publications of the Henri Poincaré Archives}, pages = {198–213}, year = 2008, month = jun, doi = {10.1007/978-3-7643-8653-5_13}, comment = {Bogus Google Scholar hit -- their ``affine arithmetic'' is not our AA but some logic thing.}, abstract = {This paper does not pretend to be an exhaustive survey of Gödel’s interpretation of intuitionism. This long and rather complicated story has already been told and analyzed by others, for instance in (Kreisel 1987b) and (Tait 2006a, b). More modestly, our first aim here is to present a different appearance of a ghost during that story, the one of impredicativity, and to show that only one case is to be taken seriously from a strict anti-realist point of view: the impredicativity of the concept of natural number. This leads to our second aim, which is to present some of the feasible versions of Gödel’s Dialectica interpretation.} } @techreport{fig-95-aa-surfint-tr, author = {De Figueiredo, Luiz Henrique}, title = {Surface Intersection using Affine Arithmetic}, institution = {UNiversity of Waterloo}, number = {CS-95-47}, pages = {15}, year = 1995, month = oct, note = {}, comment = {See conference version [fig-96-aa-surfint]}, abstract = {We describe a variant of a domain decomposition method proposed by Gleicher and Kass for intersecting and trimming parametric surfaces. Instead of using interval arithmetic to guide the decomposition, the variant described here uses affine arithmetic, a tool recently proposed for range analysis. Affine arithmetic is similar to standard interval arithmetic, but takes into account correlations between operands and sub-formulas, generally providing much tighter bounds for the computed quantities. As a consequence, the quadtree domain decompositions are much smaller and the intersection algorithm runs faster.} } @article{fig-gom-96-partic, author = {De Figueiredo, Luiz Henrique and Gomes, Jonas}, title = {Sampling Implicit Objects with Physically-Based Particle Systems}, journal = {Computers {\&} Graphics}, volume = {20}, number = {3}, pages = {365-375}, year = 1996, month = may, doi = {10.1016/0097-8493(96)00005-2}, comment = {Mentions AA but does not use it. Seeds particles in space and lets them travel towards the surface.}, abstract = {After reviewing three classical sampling methods for implicit objects, we describe a new sampling method that is not based on scanning the ambient space. In this method, samples are ``randomly'' generated using physically-based particle systems.} } @entry{bec-zyu-mon-dar-myg-fox-18-aa-coqhol, author = {Becker, Heiko and Zyuzin, Nikita and Monat, Raphaël and Darulov{\'a}, Eva and Mygreen, Magnus O. and Fox, Anthony}, title = {A Verified Certificate Checker for Finite-Precision Error Bounds in {Coq} and {HOL4}} booktitle = {Proceedings of the 2018 Conference on Formal Methods in Computer Aided Design (FMCAD)}, booktitle = {Selected papers from the }, location = {Austin, US}, pages = {1-10}, year = 2018, month = nov, doi = {10.23919/FMCAD.2018.8603019}, comment = {Presents a tool that automatically checks the correctness of declared finite-precision roundoff error bounds, for both \texttt{Coq} and \texttt{HOL4}. Uses AA and IA.}, abstract = {Being able to soundly estimate roundoff errors of finite-precision computations is important for many applications in embedded systems and scientific computing. Due to the discrepancy between continuous reals and discrete finite-precision values, automated static analysis tools are highly valuable to estimate roundoff errors. The results, however, are only as correct as the implementations of the static analysis tools. This paper presents a formally verified and modular tool which fully automatically checks the correctness of finite-precision roundoff error bounds encoded in a certificate. We present implementations of certificate generation and checking for both Coq and HOL4 and evaluate it on a number of examples from the literature. The experiments use both in-logic evaluation of Coq and HOL4, and execution of extracted code outside of the logics: we benchmark Coq extracted unverified OCaml code and a CakeML-generated verified binary.} } @entry{tam-bis-sin-21-aa-repair, author = {Tamta, Renu and Bisht, Soni and Singh, Suraj Bhan}, title = {Reliability Evaluation of Repairable Parallel-Series Multi-State System Implementing Interval Valued Universal Generating Function}, journal = {Journal of Reliability and Statistical Studies}, publisher = {River}, volume = {14}, number = {1}, pages = {81–120}, year = 2021, month = apr, doi = {10.13052/jrss0974-8024.1415}, url = {https://scholar.archive.org/work/nhcg3bg3abfbvagykztfs46j7q/access/wayback/https://www.journal.riverpublishers.com/index.php/JRSS/article/download/2483/1936}, note = {Site of publisher River looks defunct.}, comment = {Mentions AA en passant but does not use it.}, abstract = {In this paper, we have studied a repairable parallel-series multi-state system. The proposed system consists of m components in series and n components in parallel in which each component has three possible states. The interval universal generating function (IUGF) is presented, and the corresponding composition operators are defined. The reliability assessment of the considered system is done with the help of the IUGF approach. It is worth mentioning that IUGF got attention from various researchers due to its straightforwardness, less complexity, and universal applications. In the present model, probabilities of different components, reliability, sensitivity, and mean time to failure are evaluated with the help of the Markov process; Laplace-Steiltjes transform method applying IUGF. A numerical example has also been taken to illustrate the proposed technique.} } @article{fuc-cao-tan-lon-18-aa-decomp, author = {Fu, C. M. and Cao, L. X. and Tang, J. C. and Long, X. Y.}, title = {A Subinterval Decomposition Analysis Method for Uncertain Structures with Large Uncertainty Parameters}, journal = {Computers {\&} Structures,}, volume = {197}, pages = {58-69}, year = 2018, month = feb, doi = {10.1016/j.compstruc.2017.12.001}, comment = {Not clear whether it even mentions AA. Apparently some non-guaranteed heuristic combining IA and midpoint FP evaluation?}, abstract = {A simple and efficient subinterval decomposition analysis method is proposed to evaluate the lower and upper bounds of structural responses with large uncertain parameters. The proposed method decomposes the original structural system with multi-dimensional interval parameters into multiple one-dimensional subsystems. Every subsystem has only one interval parameter and the other interval parameters are substituted by their midpoint values. By dividing the interval parameter of each subsystem into several subintervals with small uncertainty, the lower and upper bounds of the system are approximately calculated by only a few subinterval combinational analyses instead of all possible combinations of subintervals. Finally, the accuracy and efficiency of the proposed method compared with the first-order Taylor method, Chebyshev interval method and traditional subinterval method are verified by several numerical examples and applications.} } @article{fuc-liu-xia-19-aa-diffev, author = {Fu, Chunming and Liu, Yongxia and Xiao, Zhang}, title = {Interval Differential Evolution with Dimension-Reduction Interval Analysis Method for Uncertain Optimization Problems}, journal = {Applied Mathematical Modelling,}, volume = {69}, pages = {441-452}, year = 2019, month = may, doi = {10.1016/j.apm.2018.12.025}, comment = {Unclear abstract. Maybe optimization of a problem defined by differential equation? Mentions AA en passant (but does not use it) and also ``improved interval analysis by Extra Unitary Interval'' (IIA-EUI), ``parameterized interval analysis (PIA)'' and ``universal grey system theory''.}, abstract = {A constrained nonlinear interval optimization method under the framework of differential evolution algorithm is developed to solve the uncertain structural optimization problems with interval uncertainties. The proposed method is a direct optimization method based on the interval differential evolution and dimension-reduction interval analysis. The interval preferential rule based on the satisfaction value of interval possibility degree model is used to realize the direct interval ranking of different design vectors. At each evolutionary generation, the outer optimizer by differential evolution optimizer searches for the best solution within the design space. The dimension-reduction interval analysis is employed to calculate the intervals of objective and constraints for each design vector in the inner layer. This operation transforms the original nesting optimization problem into a single loop one which improves the computational efficiency of the proposed method. Finally, the effectiveness of the presented direct method is verified by two numerical examples and an engineering application.} } @inproceedings{fuh-gla-gri-hed-17-aa-anamix, author = {F{\"u}rtig, Andreas and Gl{\"a}ser, Georg and Grimm, Christoph and Hedrich, Lars and Heinen, Stefan and Lee, Hyun-Sek Lukas and Nitsche, Gregor and Olbrich, Markus and Radoji{\v{c}}ic, {\v{C}}arna and Speicher, Fabian}, title = {Novel Metrics for Analog Mixed-Signal Coverage}, booktitle = {Proceeedings of the 20th IEEE International Symposium on Design and Diagnostics of Electronic Circuits {\&} Systems (DDECS)}, pages = {97-102}, year = 2017, month = apr, doi = {10.1109/DDECS.2017.7934589}, comment = {Survey article for analysis of analog/mixed-signal circuits? Mentions AA briefly.}, abstract = {On the contrary to the digital world, no coverage definition exists in the Analog/Mixed-Signal (AMS) context. As digital coverage helps digital designers and verification engineers to evaluate their verification progress, analog designers do not have such metrics. This paper proposes a set of different analog coverage metrics, which improve the confidence in AMS circuit verification. We will demonstrate, that no single overall coverage metric exists. However, as with digital coverage, the proposed analog coverage metrics could substantially help in rating the verification process. Illustrated by a complex AMS circuit example we will explore the limits of analog coverage methodologies as well as the benefits on different levels of abstraction ranging from transistor level up to system level.} } @inproceedings{gaf-cla-con-06-aa-powbit, author = {Gaffar, Altaf Abdul and Clarke, Jonathan A. and Constantinides, George A.}, title = {{PowerBit} - {Power} Aware Arithmetic Bit-Width Optimization}, booktitle = {Proceedings of the IEEE International Conference on Field Programmable Technology (FPT)}, pages = {289-292}, year = 2006, month = dec, doi = {10.1109/FPT.2006.270330}, comment = {Describes tool \texttt{PowerBit} to optimize bit-widths in DSP FPGAs. Uses AA for both precision analysis and range analysis.}, abstract = {In this paper we present a novel method reducing the dynamic power consumption in FPGA-based arithmetic circuits by optimizing the bit-widths of the signals inside the circuit. The proposed method is implemented in the tool PowerBit, which makes use of macro models parameterized by word-level signal statistics to estimate the circuit power consumption during the optimization process. The power models used take in to account the generation and propagation of signal glitches through the circuit. The bit-width optimization uses a static analysis technique which is capable of providing guaranteed accuracy in the design outputs. We show that, for sample designs implemented on FPGAs that improvements of over 10% are possible for multiple bit-width allocated designs optimized for power compared to designs allocated uniform bit-widths} } @misc{gal-17-aa-bisect, author = {Galv{\'a}n, Manuel L{\'o}pez}, title = {The Multivariate Bisection Algorithm}, howpublished = {Online document at the ArXiv repository.}, number = {1702.05542 version 2}, pages = {19}, year = 2017, month = nov, doi = {10.48550/arXiv.1702.05542}, comment = {Solves systems of non-linear equations using domain subdivision. Uses AA to evaluate the functions in a cell. Uses the Poincaré-Miranda theorem to prove the existence of a solution in a given cell, when the test succeeds (but it may not always succeed, even arbitrarily close to the solution).}, abstract = {The aim of this paper is the study of the bisection method in ℝn. In this work we propose a multivariate bisection method supported by the Poincaré-Miranda theorem in order to solve non-linear system of equations. Given an initial cube verifying the hypothesis of Poincaré-Miranda theorem the algorithm performs congruent refinements throughout its center by generating a root approximation. Throughout preconditioning we will prove the local convergence of this new root finder methodology and moreover we will perform a numerical implementation for the two dimensional case.} } @article{gal-gue-par-pey-20-aa-lipray, author = {Galin, {\'E}ric and Gu{\'e}rin, {\'E}ric and Paris, Axel and Peytavie, Adrien}, title = {Segment Tracing using Local {Lipschitz} Bounds}, journal = {Computer Graphics Forum}, volume = {39}, number = {2}, pages = {545-554}, year = 2020, month = may, doi = {10.1111/cgf.13951}, comment = {Problem is accelerating ray-tracing. Uses Lipschitz bounds to compute enclosure for the implicit object's function. Claims that it is faster than AA.}, abstract = {We introduce Segment Tracing, a new algorithm that accelerates the classical Sphere Tracing method for computing the intersection between a ray and an implicit surface. Our approach consists in computing the Lipschitz bound locally over a segment to improve the marching step computation and accelerate the overall process. We describe the computation of the Lipschitz bound for different operators and primitives. We demonstrate that our algorithm significantly reduces the number of field function queries compared to previous methods, without the need for additional accelerating data-structures. Our method can be applied to a vast variety of implicit models ranging from hierarchical procedural objects built from complex primitives, to simulation-generated implicit surfaces created from many particles.} } @article{gam-mad-07-aa-progray, author = {Gamito, Manuel Noronha and Maddock, Steve C.}, title = {Progressive Refinement Rendering of Implicit Surfaces}, journal = {Computers {\&} Graphics}, volume = {31}, number = {5}, pages = {698-709}, year = 2007, month = oct, doi = {10.1016/j.cag.2007.04.011}, comment = {Rendering implicit surfaces that are Lipschitz continuous. Rather than ray-casting uses low-res approximation. Divides the image in superpixels and samples with a ray at the center of each superpixel. Uses Lipschitz bounds. Mentions AA but claims his method is better. Isn't the ZZ-buffer method better?}, abstract = {The visualisation of implicit surfaces can be an inefficient task when such surfaces are complex and highly detailed. Visualising a surface by first converting it to a polygon mesh may lead to an excessive polygon count. Visualising a surface by direct ray casting is often a slow procedure. In this paper we present a progressive refinement renderer for implicit surfaces that are Lipschitz continuous. The renderer first displays a low resolution estimate of what the final image is going to be and, as the computation progresses, increases the quality of this estimate at an interactive frame rate. This renderer provides a quick previewing facility that significantly reduces the design cycle of a new and complex implicit surface. The renderer is also capable of completing an image faster than a conventional implicit surface rendering algorithm based on ray casting.} } @inproceedings{gam-mad-07-aa-proimp, author = {Gamito, Manuel Noronha and Maddock, Steve C.}, title = {A Progressive Refinement Approach for the Visualisation of Implicit Surfaces}, booktitle = {Revised Selected Papers of the International VISAPP and GRAPP Conferences}, location = {Set{\'u}bal, PT}, series = Communications in Computer and Information Science}, volume = {4}, pages = {93–108}, year = 2007, month = feb, doi = {10.1007/978-3-540-75274-5_6}, comment = {Basically beam casting with AA. How does this compare with the revised ZZ-buffer stuff? Defines ``reduced affine arithmetic'' which is AA where the noises are limited to the input variables, plus a single term for all other errors.}, abstract = {Visualising implicit surfaces with the ray casting method is a slow procedure. The design cycle of a new implicit surface is, therefore, fraught with long latency times as a user must wait for the surface to be rendered before being able to decide what changes should be introduced in the next iteration. In this paper, we present an attempt at reducing the design cycle of an implicit surface modeler by introducing a progressive refinement rendering approach to the visualisation of implicit surfaces. This progressive refinement renderer provides a quick previewing facility. It first displays a low quality estimate of what the final rendering is going to be and, as the computation progresses, increases the quality of this estimate at a steady rate. The progressive refinement algorithm is based on the adaptive subdivision of the viewing frustrum into smaller cells. An estimate for the variation of the implicit function inside each cell is obtained with an affine arithmetic range estimation technique. Overall, we show that our progressive refinement approach not only provides the user with visual feedback as the rendering advances but is also capable of completing the image faster than a conventional implicit surface rendering algorithm based on ray casting.} } @article{gam-mad-08-aa-topcor, author = {Gamito, Manuel Noronha and Maddock, Steve C.}, title = {Topological Correction of Hypertextured Implicit Surfaces for Ray Casting}, journal = {The Visual Computer}, volume = {24}, number = {}, pages = {397–409}, year = 2008, month = jun, doi = {10.1007/s00371-008-0221-4}, comment = {Problem is removing detached crumbs that appear when porous objects are modeled as implicit surfaces. They use AA to compute critical points of the implicit function and follow separatrix lines.}, abstract = {Hypertextures are a useful modelling tool in that they can add three-dimensional detail to the surface of otherwise smooth objects. Hypertextures can be rendered as implicit surfaces, resulting in objects with a complex but well defined boundary. However, representing a hypertexture as an implicit surface often results in many small parts being detached from the main surface, turning an object into a disconnected set. Depending on the context, this can detract from the realism in a scene, where one usually does not expect a solid object to have clouds of smaller objects floating around it. We present a topology correction technique, integrated in a ray casting algorithm for hypertextured implicit surfaces, that detects and removes all the surface components that have become disconnected from the main surface. Our method works with implicit surfaces that are C2 continuous and uses Morse theory to find the critical points of the surface. The method follows the separatrix lines joining the critical points to isolate disconnected components.} } @inproceedings{gao-14-aa-nsurv, author = {Gao, Zheng}, title = {Numerical program analysis and testing}, booktitle = {Proceedings of the 22nd ACM SIGSOFT International Symposium on Foundations of Software Engineering (FSE)}, location = {Hong Kong, CN}, pages = {779–782}, year = 2014, month = nov, doi = {10.1145/2635868.2666603}, comment = {Survey article. Mentions and briefly describes AA.}, abstract = {Numerical software is playing an increasingly critical role in modern society, but composing correct numerical programs is difficult. This paper describes a doctoral research program that aims to alleviate this issue. It tackles real world problems and is guided by features learned from empirically studying these programs. By assisting developers in the production of numerical software, it improves the quality and productivity of software development. The research depends on numerical analysis and lies in the intersection of software engineering and program analysis.} } @article{gao-wud-son-tin-lix-11-aa-finel, author = {Gao, Wei and Wu, Di and Song, Chongmin and Tin-Loi, Francis and Li, Xiaojing}, title = {Hybrid probabilistic interval analysis of bar structures with uncertainty using a mixed perturbation Monte-Carlo method}, journal = {Finite Elements in Analysis and Design}, volume = {47}, number = {7}, pages = {643-652}, year = 2011, month = jul, doi = {10.1016/j.finel.2011.01.007}, comment = {Reliability analysis of structures by finite-element method. Uses montecarlo and interval methods. Not clear whether it uses AA.}, abstract = {This paper presents a mixed perturbation Monte-Carlo method for static and reliability analysis of structural systems with a mixture of random and interval parameters/loadings. Using a combination of the Taylor expansion, matrix perturbation theory and random interval moment method, the expressions for the mean value and standard deviation of random interval structural responses are developed. The Monte-Carlo simulation method is employed to determine the lower and upper bounds of the mean values and standard deviations of structural displacements and stresses. The structural reliability is not a deterministic value but an interval as the structural stress responses are random interval variables. The lower and upper bounds of probability of failure and reliability of structural elements and systems are investigated based on the first-order second-moment reliability method and interval approach. Three numerical examples are used to illustrate the effectiveness of the proposed method.} } @inproceedings{gar-mic-rue-20-aa-rigenc, author = {Garcia, Rémy and Michel, Claude and Rueher, Michel}, title = {Rigorous Enclosure of Round-Off Errors in Floating-Point Computations}, booktitle = {Revised Selected Papers of the 12th Working Conference on Verified Software: Theories, Tools, and Experiments (VSTTE) and of the 13th International Workshop on Software Verification (NSV)}, location = {Los Angeles, US}, pages = {196–212}, year = 2020, month = jul, doi = {10.1007/978-3-030-63618-0_12}, comment = {Described \texttt{FErA}, a tool to obtain both witnesses and enclosures by ``rational arithmetic''. Mentions AA but does not use it?}, abstract = {Efficient tools for error analysis of programs with floating-point computations are available. Most of them provide an over-approximation of the floating-point errors. The point is that these approximations are often too coarse to evaluate the effective impact of the error on the behaviour of a program. Some of these tools compute an under-approximation of the maximal error. But, these under-approximations are either not rigorous or not reachable. In this paper, we introduce a new approach to rigorously enclose the maximal error by means of an over-approximation of the error and an under-approximation computed by means of rational arithmetic. Moreover, our system, called FErA, provides input values that exercise the under-approximations. We outline the advantages and limits of our framework and compare our approach with state-of-the-art methods for over-approximating errors as well as the ones computing under-approximation of the maximal error. Preliminary experiments on standard benchmarks are promising. FErA not only computes good error bounds on most benchmarks but also provides an effective lower bound on the maximal error.} } @article{gba-nwu-bab-21-aa-harm, author = {Gbadamosi, Saheed Lekan and Nwulu, Nnamdi and Babatunde, Olubayo Moses}, title = {Harmonic Estimation on a Transmission System with Large-Scale Renewable Energy Sources}, journal = {Przegl{\k{a}}d Elektrotechniczny}, volume = {97}, number = {4}, pages = {146-151}, year = 2021, month = jun, doi = {10.15199/48.2021.04.26 }, comment = {Mentions complex affine arithmetic in related works but does not use it?}, abstract = {This paper presents a modelling and simulation approach using the Electrical Transient Analyzer Program software to evaluate the magnitude and effects of harmonics from varying RES into the transmission system. An analytical technique was developed to estimate and quantify the harmonic power flow and losses amplification on the transmission lines. The efficiency of the proposed approach is implemented on nondistorted Garver’s 6 bus and IEEE 24 bus test systems. The developed technique can quantitatively estimate harmonic contributions from RES.} } @article{gel-liu-yan-zhu-zha-liy-21-aa-carbon, author = {Ge, Leijiao and Liu, Hangxu and Yan, Jun and Zhu, Xinshan and Zhang, Shuai and Li, Yuanzheng}, title = {Optimal Integrated Energy System Planning with {DG} Uncertainty Affine Model and Carbon Emissions Charges}, journal = {IEEE Transactions on Sustainable Energy}, volume = {13}, number = {2}, pages = {905-918}, year = 2021, month = apr, doi = {10.1109/TSTE.2021.3139109}, comment = {Combines AA with particle swarm to compute power flow etc. on a power grid with distributed generators.}, abstract = {Integrated energy systems (IES) with cooling, heat, electricity, and natural gas have drawn significant interest recently as we embrace more sustainable energy a midst climate change. However, the uncertain outputs of distributed generators (DGs) make it challenging for IES planning while maintaining low-cost installation and operation under carbon emission constraints. To tackle the challenge, this work proposes an optimal planning model for IES considering both DG output uncertainties and carbon emission punishments. To reduce the conservatism of the widely-adopted interval and affine algorithms, an affine model based on the matrix form is first proposed to model the uncertain DG outputs. A tiered dynamic charging cost model is further developed to introduce and minimize carbon emissions with a punishment mechanism at the planning stage. Based on these two sub-models, an optimal IES planning model is proposed to simultaneously minimize the overall costs of investment, operation, and carbon emissions. To solve the multi-dimensional nonlinear model, an improved quantum particle swarm optimization (IQPSO) algorithm is introduced with enhanced global optimization ability. Simulation results on the IEEE 33-bus and 69-bus IES network have demonstrated that the proposed method can effectively reduce the impacts of DG uncertainty and carbon emissions at the planning stage of IES with a better long-term economy.} } @article{gel-liy-liy-yan-sun-22-aa-smart, author = {Leijiao Ge and Li, Yuanliang and Li, Yuanliang and Yan, Jun and sun, Yonghui}, title = {Smart Distribution Network Situation Awareness for High-Quality Operation and Maintenance: {A} Brief Review}, journal = {Energies}, volume = {15}, number = {3}, pages = {828-}, year = 2022, month = jan, doi = {10.3390/en15030828}, comment = {Survey of tools for power flow etc. Brief mention of AA.}, abstract = {In order to meet the requirements of high-tech enterprises for high power quality, high-quality operation and maintenance (O&M) in smart distribution networks (SDN) is becoming increasingly important. As a significant element in enhancing the high-quality O&M of SDN, situation awareness (SA) began to excite the significant interest of scholars and managers, especially after the integration of intermittent renewable energy into SDN. Specific to high-quality O&M, the paper decomposes SA into three stages: detection, comprehension, and projection. In this paper, the state-of-the-art knowledge of SND SA is discussed, a review of critical technologies is presented, and a five-layer visualization framework of the SDN SA is constructed. SA detection aims to improve the SDN observability, SA comprehension is associated with the SDN operating status, and SA projection pertains to the analysis of the future SDN situation. The paper can provide researchers and utility engineers with insights into the technical achievements, barriers, and future research directions of SDN SA.} } @mastersthesis{dem-20-aa-matlib, author = {Demchenko, Dmitry Georgievich}, title = {Software Library based on Affine Arithmetic for Mathematical Processing of Measurement}, school = {St. Petersburg Polytechnic University}, note = {Bachelor's thesis, in Russian. Advisor: Konstantin Konstantinovich Semenov} pages = {??}, doi = {10.18720/SPBPU/3/2020/vr/vr20-3728} year = 2020, url = {https://elib.spbstu.ru/dl/3/2020/vr/vr20-3728.pdf/info}, comment = {Develops a MATLAB library \texttt{AffInt} implementing AA.}, abstract = {This work is devoted to the development of a software library that implements affine arithmetic for the needs of supporting calculations with inaccurate data by automatically estimating the error characteristics of calculation results for the MATLAB development environment. Tasks that were solved in the course of work. 1) Analysis of existing modifications of interval arithmetic. 2) Study of existing library implementations for working with intervals. 3) Formulation of the basic requirements taken into account when developing the library for the needs of mathematical processing of measurement results. 4) Obtaining methods and algorithms for working with affine intervals. 5) Development of a library that implements all the basic necessary functions and operations when working with affine intervals. 6) An experimental study of the developed library. The analysis of existing software implementations of affine arithmetic is carried out. The technology of creating software packages in the MATLAB environment is studied. A software package was developed that provides the ability to automate work with inaccurate data, including measurement results, using affine arithmetic. A software library called ``affin''t has been developed. Experimental studies of the developed software were conducted. Experimental confirmations of the correctness of the developed library were obtained, as well as temporal characteristics of the speed of calculations performed with its help.} } @inproceedings{ghe-mir-van-cat-ver-00-addropt-junk, author = {Ghez, Claude and Miranda, Miguel and Vandecappelle, Arnout and Catthoor, Francky and Verkest, Diederik}, title = {Systematic High-Level Address Code Transformations for Piece-Wise Linear Indexing: {Illustration} on a Medical Imaging Algorithm}, booktitle = {Proceedings of the IEEE Workshop on Signal Processing Systems (SiPS) - Design and Implementation}, location = {Lafayette, US}, pages = {603-612}, year = 2000, month = oct, doi = {10.1109/SIPS.2000.886758}, comment = {Bogus Google Scholar hit. The goal is to improve code execution efficiency for some complicated ``piecewise linear indexing'' to ``linear pointer arithmetic'' avoiding integer div and mod operations. ``Affine arithmetic'' is just indexing expressions of the form $ak + b$.}, abstract = {Exploring data transfer and storage issues is crucial to efficiently map data intensive applications (e.g., multimedia) onto programmable processors. Code transformations are used to minimise main memory bus load and hence also power and system performance, However this typically incurs a considerable arithmetic overhead in the addressing and local control. For instance, memory optimising in-place and data-layout transformations add costly module and integer division operations to the initial addressing code. In this paper, we show how the cycle overhead can be almost completely removed. This is done according to a systematic methodology which is a combination of an algebraic transformation exploration approach for the (non)linear arithmetic on top of an efficient transformation technique for reducing the piece-wise linear indexing to linear pointer arithmetic. The approach is illustrated on a real-life medical application, using a variety of programmable processor architectures. Total gains in cycle count ranging between a factor 5 and 25 are obtained compared to conventional compilers.} } @phdthesis{gho-11-aa-constr-th, author = {Ghorbal, Khalil}, title = {Static Analysis of Numerical Programs: {Constrained} Affine Sets Abstract Domain}, school = {{\'E}cole Polytechnique}, pages = {170}, year = 2011, month = nov, note = {Advisors: Sylvie Putot and {\'E}ric Goubault} url = {https://pastel.archives-ouvertes.fr/pastel-00643442}, comment = {Important. Introduces a new affine forms-based abstract domain, called ``constrained affine sets'', which extends and generalizes an already existing abstract domain introduced by Goubault and Putot. Discusses the problem of normalizing a vector (specifically, a quaternion) without the high loss of precision caused by the non-linear operations (squaring, square root, division). Notes that \texttt{Fluctuat} allows constraints on the noises which are propagated in the computations. Explains the ``perturbed affine arithmetic'' of Goubault and Putot, including the ``lesser than'' ordering, which is stronger than the mere containment of the zonotopes.}, abstract = {We aim at proving automatically the correctness of numerical behavior of a program by inferring invariants on numerical variables. More precisely, we over-approximate in a sound manner the set of reached values. We use Abstract Interpretation-based Static Analysis as a generic framework to de ne and ap- proximate the semantics of a program in a unified manner. The semantics that describe the real behavior of the program (concrete semantics) is in general undecidable. Abstract interpretation offers a way to abstract this concrete semantics to obtain a decidable semantics involving machine-expressible objects. We introduce a new affine forms-based abstract domain, called constrained affine sets, which extends and generalizes an already existing abstract domain introduced by Eric Goubault and Sylvie Putot. The expressiveness of such new domain is enhanced thanks to its ability to encode and propagate linear constraints among variables. We have implemented our new domain to experiment the precision and the efficiency of our approach and compare our results to the already existing abstract domains. The theoretical work as well as the implementation and the experiments have been the subject of two publications} } @inproceedings{gho-dug-kah-iva-12-aa-probmod, author = {Ghorbal, Khalil and Duggirala, Parasara Sridhar and Kahlon, Vineet and Ivan{\v{c}}i{\'c}, Franjo and Gupta, Aarti}, title = {Efficient Probabilistic Model Checking of Systems with Ranged Probabilities}, booktitle = {Proceedings of the 6th International Workshop on Reachability Problems (RP)}, location = {Bordeaux, FR}, series = {Lecture Notes in Computer Science book series}, volume = {7550}, pages = {107–120}, year = 2012, month = sep, doi = {10.1007/978-3-642-33512-9_10}, comment = {Uses AA to propagate linear terms and IA to handle the non-linear ones. Why is that different/better than just AA?}, abstract = {We introduce a new technique to model check reachability properties on Interval Discrete-Time Markov Chains (IDTMC). We compute a sound over-approximation of the probabilities of satisfying a given property where the accuracy is characterized in terms of error bounds. We leverage affine arithmetic to propagate the first-order error terms. Higher-order error terms are bounded using interval arithmetic.} } @inproceedings{gho-gou-put-09-aa-tayzon, author = {Ghorbal, Khalil and Goubault, {\'E}ric and Putot, Sylvie}, title = {The Zonotope Abstract Domain {Taylor}1$+$}, booktitle = {Proceedings of the 21st International Conference on Computer Aided Verification (CAV)}, location = {Grenoble, FR}, series = {Lecture Notes in Computer Science book series}, volume = {5643}, pages = {627–633}, year = 2009, month = jun, doi = {10.1007/978-3-642-02658-4_47}, comment = {Uses AA to do static analysis or ranges of variables in programs. Is AA the ``Taylor $1+$ arithmetic''?}, abstract = {Static analysis by abstract interpretation [1] aims at automatically inferring properties on the behaviour of programs. We focus here on a specific kind of numerical invariants: the set of values taken by numerical variables, with a real numbers semantics, at each control point of a program.} } @inproceedings{gho-gou-put-10-aa-zonint, author = {Ghorbal, Khalil and Goubault, {\'E}ric and Putot, Sylvie}, title = {A Logical Product Approach to Zonotope Intersection}, journal = {Proceedings of the 22nd International Conference on Computer Aided Verification (CAV)}, location = {Edinburgh, UK}, series = {Lecture Notes in Computer Science}, volume = {6174}, pages = {212–226}, year = 2010, month = jul, doi = {10.1007/978-3-642-14295-6_22}, comment = {Concerned with reachability analysis in hybrid systems. Defines and studies ``a new abstract domain which is a fine-grained combination of zonotopes with (sub-)polyhedric domains such as the interval, octagon, linear template or polyhedron domains'' Claims that they are able to interpret tests (i.e. intersections) efficiently. Describes the \texttt{APRON} library.}, abstract = {We define and study a new abstract domain which is a fine-grained combination of zonotopes with (sub-)polyhedric domains such as the interval, octagon, linear template or polyhedron domains. While abstract transfer functions are still rather inexpensive and accurate even for interpreting non-linear computations, we are able to also interpret tests (i.e. intersections) efficiently. This fixes a known drawback of zonotopic methods, as used for reachability analysis for hybrid systems as well as for invariant generation in abstract interpretation: intersection of zonotopes are not always zonotopes, and there is not even a best zonotopic over-approximation of the intersection. We describe some examples and an implementation of our method in the APRON library, and discuss some further interesting combinations of zonotopes with non-linear or non-convex domains such as quadratic templates and maxplus polyhedra.} } @phdthesis{gil-11-aa-cybphy-th, author = {Gilberti, Michael}, title = {Networked adaptive classification for cyber-physical systems}, school = {Stony Brook University}, note = {Advisor: Alex Doboli. Date on cover page is Dev/2009. No DOI?}, pages = {89}, year = 2011, month = sep, url = {http://hdl.handle.net/1951/52344}, comment = {Considers chemical gas cloud localization by a distributed network. Describes various systems that use AA but it is not clear whether he uses them himself.}, abstract = {Cyber-physical Systems (CPS’s) are networks of embedded systems which operate under tight power and timing constraints and with varying levels of precision. The primary goal is to meet the above requirements for an application that is distributed over a network. Adaptive Classification is introduced in the context of a chemical gas cloud localization application running over the CPS. Standard machine learning binary classifiers are employed and optimized for implementation in reconfigurable hardware at varying precisions, power requirements and speeds. An exemplar set of classifiers contains one chemical species classifier and four individual chemical classifiers such as, Alcohol versus the-rest (of the species) and methyl alcohol versus the-rest(of the individual alcohols). Other sets classify additional species and chemicals. The network is populated with these classifiers by assigning one set of chemical classifiers per node and apply techniques from the field of evolutionary game theory to allow the nodes to self-configure over time.} } @inproceedings{gil-rad-17-aa-robcon, author = {Gil, Leandro and Radetzki, Martin}, title = {Semi-Symbolic Operational Computation for Robust Control System Design}, booktitle = {Proceedings of the 22nd International Conference on Methods and Models in Automation and Robotics (MMAR)}, location = {Miedzyzdroje, PL}, pages = {779-784}, year = 2017, month = aug, doi = {10.1109/MMAR.2017.8046927}, comment = {Not clear whether it is related or an improvement of AA. May be special handling of multiplication.}, abstract = {Semi-symbolic simulation is becoming popular for inclusion of parameter uncertainties in the system design analysis. For robust control system design optimization, computational methods enabling fast semi-symbolic simulations are necessary. We propose an operational computation method based on orthogonal signals that is faster than step integration methods and allows the direct evaluation of system robustness to parameter variations. In order to improve the simulation performance during design optimization, we derive a novel operational method to compute the multiplication of signal expansions. Thus, common nonlinear cost functions can be directly computed, using only signal coefficients. The evaluation of signals during the optimization is avoided by this method, which is a significant advantage compared to other known approaches for dynamic system simulation. We validate the capability of our design methodology for the improvement of system performance and robustness by optimizing a DC motor control. The obtained results show that affine arithmetic computations are well suited for robust control system design optimization in the time domain.} } @entry{gom-14-aa-curvdr, author = {Gomes, Abel J. P.}, title = {A Continuation Algorithm for Planar Implicit Curves with Singularities}, journal = {Computers {\&} graphics}, volume = {38}, pages = {365-373}, year = 2014, month = feb, doi = {10.1016/j.cag.2013.11.006}, comment = {Mentions AA but it is not clear whether it uses it.}, abstract = {Continuation algorithms usually behave badly near to critical points of implicitly defined curves in $\mathbb{R}$, i.e., points at which at least one of the partial derivatives vanishes. Critical points include turning points, self-intersections, and isolated points. Another problem with this family of algorithms is their inability to render curves with multiple components because that requires finding first a seed point on each of them. This paper details an algorithm that resolves these two major problems in an elegant manner. In fact, it allows us not only to march along a curve even in the presence of critical points, but also to detect and render curves with multiple components using the theory of critical points.} }