Skip to content

Commit

Permalink
add SFGE, clarify selected
Browse files Browse the repository at this point in the history
  • Loading branch information
bamos committed Jun 18, 2024
1 parent ea6f89e commit 3f0439a
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 2 deletions.
11 changes: 11 additions & 0 deletions publications/all.bib
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,17 @@ @misc{sambharya2024learning
}
}

@misc{silvestri2024score,
title={Score Function Gradient Estimation to Widen the Applicability of Decision-Focused Learning},
author={Mattia Silvestri and Senne Berden and Jayanta Mandi and Ali İrfan Mahmutoğulları and Brandon Amos and Tias Guns and Michele Lombardi},
year={2024},
url={https://arxiv.org/abs/2307.05213},
_venue={arXiv},
abstract={
Many real-world optimization problems contain parameters that are unknown before deployment time, either due to stochasticity or to lack of information (e.g., demand or travel times in delivery problems). A common strategy in such cases is to estimate said parameters via machine learning (ML) models trained to minimize the prediction error, which however is not necessarily aligned with the downstream task-level error. The decision-focused learning (DFL) paradigm overcomes this limitation by training to directly minimize a task loss, e.g. regret. Since the latter has non-informative gradients for combinatorial problems, state-of-the-art DFL methods introduce surrogates and approximations that enable training. But these methods exploit specific assumptions about the problem structures (e.g., convex or linear problems, unknown parameters only in the objective function). We propose an alternative method that makes no such assumptions, it combines stochastic smoothing with score function gradient estimation which works on any task loss. This opens up the use of DFL methods to nonlinear objectives, uncertain parameters in the problem constraints, and even two-stage stochastic optimization. Experiments show that it typically requires more epochs, but that it is on par with specialized methods and performs especially well for the difficult case of problems with uncertainty in the constraints, in terms of solution quality, scalability, or both.
}
}

@misc{amos2023tutorial,
title={Tutorial on amortized optimization},
author={Brandon Amos},
Expand Down
2 changes: 1 addition & 1 deletion templates/latex/sections/all_publications.tex
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


\setlength\fboxsep{2pt}
Selected publications are \hspace{-\fboxsep}\colorbox{tab_highlight}{highlighted.}
Selected publications I am a primary author on are \hspace{-\fboxsep}\colorbox{tab_highlight}{highlighted.}
% [\href{https://github.com/bamos/cv/blob/master/publications/<< content.file >>}{BibTeX}]

% [<a href="https://scholar.google.com/citations?user={{ scholar_id }}">Google Scholar</a>]
Expand Down
2 changes: 1 addition & 1 deletion templates/markdown/sections/all_publications.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
## <i class="fa fa-chevron-right"></i> Publications

<!-- [<a href="https://github.com/bamos/cv/blob/master/publications/{{ content.file }}">BibTeX</a>] -->
Selected publications are <span style='background-color: #ffffd0'>highlighted.</span>
Selected publications I am a primary author on are <span style='background-color: #ffffd0'>highlighted.</span>
<!-- {{ summary }} -->
<br>
[<a href="https://scholar.google.com/citations?user={{ scholar_id }}">Google Scholar</a>: {{ scholar_stats.citations }} citations and an h-index of {{ scholar_stats.h_index}}.]
Expand Down

0 comments on commit 3f0439a

Please sign in to comment.