
Ben Feinberg
Senior Member of Technical Staff, Sandia National Laboratories
I am a researcher in the Scalable Computer Architectures department at Sandia National Laboratories. My research focuses on memory-centric and analog accelerators for HPC and ML applications, and energy-efficient and reliable architectures for autonomous systems.
Research interests: analog accelerators, edge/autonomous systems architectures, memory-centric architectures with emerging memories
Education
- Ph.D. Electrical Engineering, University of Rochester, 2019
- M.S. Electrical Engineering, University of Rochester, 2014
- B.S. Electrical Engineering, University of Rochester, 2012
Selected Papers
@inproceedings{wong2026darthpum,
author = {Ryan Wong and Ben Feinberg and Saugata Ghose},
title = {{DARTH-PUM: A Hybrid Processing-Using-Memory Architecture}},
booktitle = {International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS)},
year = {2026},
month = {mar},
address = {Pittsburgh, PA, USA},
doi = {10.1145/3779212.3790151}
}
@article{feinberg2026letanalog,
author = {Ben Feinberg and T. Patrick Xiao and Christopher H. Bennett and Sapan Agarwal},
title = {{Let Analog Be Analog: Principles for Designing Analog Accelerators}},
journal = {IEEE Micro},
year = {2026},
pages = {1--10},
doi = {10.1109/MM.2026.3666132}
}
@inproceedings{wong2025anvil,
author = {Ryan Wong and Nikita Kim and Aniket Das and Kevin Higgs and Engin Ipek and Sapan Agarwal and Saugata Ghose and Ben Feinberg},
title = {{ANVIL: An In-Storage Accelerator for Name--Value Data Stores}},
booktitle = {International Symposium on Computer Architecture (ISCA)},
year = {2025},
month = {jun},
address = {Tokyo, Japan},
doi = {10.1145/3695053.3731000}
}
@misc{xiao2024analogfft,
author = {T. Patrick Xiao and Ben Feinberg and David K. Richardson and Matthew Cannon and Calvin Madsen and Harsha Medu and Vineet Agrawal and Matthew J. Marinella and Sapan Agarwal and Christopher H. Bennett},
title = {{Analog Fast Fourier Transforms for Scalable and Efficient Signal Processing}},
howpublished = {arXiv preprint arXiv:2409.19071},
year = {2024},
doi = {10.48550/arXiv.2409.19071}
}
@article{xiao2022accuracy,
author = {T. Patrick Xiao and Ben Feinberg and Christopher H. Bennett and Venkatraman Prabhakar and Prashant Saxena and Vineet Agrawal and Sapan Agarwal and Matthew J. Marinella},
title = {{On the Accuracy of Analog Neural Network Inference Accelerators}},
journal = {IEEE Circuits and Systems Magazine},
year = {2022},
volume = {22},
number = {4},
pages = {26--48},
doi = {10.1109/MCAS.2022.3214409}
}
@article{xiao2022sonos,
author = {T. Patrick Xiao and Ben Feinberg and Christopher H. Bennett and Vineet Agrawal and Prashant Saxena and Venkatraman Prabhakar and Krishnaswamy Ramkumar and Harsha Medu and Vijay Raghavan and Ramesh Chettuvetty and Sapan Agarwal and Matthew J. Marinella},
title = {{An Accurate, Error-Tolerant, and Energy-Efficient Neural Network Inference Engine Based on SONOS Analog Memory}},
journal = {IEEE Transactions on Circuits and Systems I: Regular Papers},
year = {2022},
volume = {69},
number = {4},
pages = {1480--1493},
doi = {10.1109/TCSI.2021.3134313}
}
@inproceedings{feinberg2021preconditioner,
author = {Ben Feinberg and Ryan Wong and T. Patrick Xiao and Christopher H. Bennett and Jacob N. Rohan and Erik G. Boman and Matthew J. Marinella and Sapan Agarwal and Engin Ipek},
title = {{An Analog Preconditioner for Solving Linear Systems}},
booktitle = {International Symposium on High-Performance Computer Architecture (HPCA)},
year = {2021},
month = {feb},
address = {Virtual},
doi = {10.1109/HPCA51647.2021.00069}
}
@inproceedings{feinberg2020commutative,
author = {Ben Feinberg and Benjamin C. Heyman and Darya Mikhailenko and Ryan Wong and An C. Ho and Engin Ipek},
title = {{Commutative Data Reordering: A New Technique to Reduce Data Movement Energy on Sparse Inference Workloads}},
booktitle = {International Symposium on Computer Architecture (ISCA)},
year = {2020},
month = {jun},
address = {Virtual},
doi = {10.1109/ISCA45697.2020.00091}
}
@inproceedings{feinberg2018enabling,
author = {Ben Feinberg and Uday Kumar Reddy Vengalam and Nathan Whitehair and Shibo Wang and Engin Ipek},
title = {{Enabling Scientific Computing on Memristive Accelerators}},
booktitle = {International Symposium on Computer Architecture (ISCA)},
year = {2018},
month = {jun},
address = {Los Angeles, CA, USA},
doi = {10.1109/ISCA.2018.00039}
}
@inproceedings{feinberg2018making,
author = {Ben Feinberg and Shibo Wang and Engin Ipek},
title = {{Making Memristive Neural Network Accelerators Reliable}},
booktitle = {International Symposium on High-Performance Computer Architecture (HPCA)},
year = {2018},
month = {feb},
address = {Vienna, Austria},
doi = {10.1109/HPCA.2018.00015}
}
Software
CrossSim
CrossSim is a GPU-accelerated accuracy simulator and co-design tool for analog in-memory computing. It models how hardware non-idealities in resistive crossbar arrays — programming errors, conductance drift, read noise, ADC quantization, and parasitic resistance — affect algorithm accuracy across neural network inference, signal processing, and linear algebra workloads. CrossSim provides a NumPy-like API, interfaces for PyTorch and Keras models, and supports hardware-aware training.
SST Elements
The Structural Simulation Toolkit (SST) is a modular parallel simulation framework for exploring innovations in highly concurrent systems, including processors, memory hierarchies, and network interconnects. Within SST Elements, I maintain Golem, a simulation model for analog accelerator tiles, and contribute to Carcosa, a component for modeling heterogeneous compute architectures.