@@ -38,6 +38,8 @@ template <int N> class LBFGS {
3838 int mem_size_ = 10 ; // number of vector used for approximating the objective hessian
3939 Eigen::Matrix<double , Dynamic, Dynamic> grad_mem_, x_mem_;
4040 public:
41+ static constexpr bool gradient_free = false ;
42+ static constexpr int static_input_size = N;
4143 vector_t x_old, x_new, update, grad_old, grad_new;
4244 double h;
4345 // constructors
@@ -127,184 +129,3 @@ template <int N> class LBFGS {
127129} // namespace fdapde
128130
129131#endif // __FDAPDE_LBFGS_H__
130-
131-
132- // This file is part of fdaPDE, a C++ library for physics-informed
133- // spatial and functional data analysis.
134- //
135- // This program is free software: you can redistribute it and/or modify
136- // it under the terms of the GNU General Public License as published by
137- // the Free Software Foundation, either version 3 of the License, or
138- // (at your option) any later version.
139- //
140- // This program is distributed in the hope that it will be useful,
141- // but WITHOUT ANY WARRANTY; without even the implied warranty of
142- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
143- // GNU General Public License for more details.
144- //
145- // You should have received a copy of the GNU General Public License
146- // along with this program. If not, see <http://www.gnu.org/licenses/>.
147-
148- // #ifndef __FDAPDE_LBFGS_H__
149- // #define __FDAPDE_LBFGS_H__
150-
151- // #include "header_check.h"
152-
153- // namespace fdapde {
154-
155- // // Implementation of Broyden–Fletcher–Goldfarb–Shanno algorithm for unconstrained nonlinear optimization with optimized
156- // // memory usage
157- // template <int N, typename... Args> class LBFGS {
158- // private:
159- // using vector_t = std::conditional_t<N == Dynamic, Eigen::Matrix<double, Dynamic, 1>, Eigen::Matrix<double, N, 1>>;
160- // using matrix_t =
161- // std::conditional_t<N == Dynamic, Eigen::Matrix<double, Dynamic, Dynamic>, Eigen::Matrix<double, N, N>>;
162-
163- // std::tuple<Args...> callbacks_;
164- // vector_t optimum_;
165- // double value_; // objective value at optimum
166- // int max_iter_; // maximum number of iterations before forced stop
167- // int n_iter_ = 0; // current iteration number
168- // double tol_; // tolerance on error before forced stop
169- // double step_; // update step
170- // int mem_size_ = 10; // number of vector used for approximating the objective hessian
171- // Eigen::Matrix<double, Dynamic, Dynamic> grad_mem_, x_mem_;
172- // public:
173- // vector_t x_old, x_new, update, grad_old, grad_new;
174- // double h;
175- // // constructors
176- // LBFGS() = default;
177- // LBFGS(int max_iter, double tol, double step, int mem_size)
178- // requires(sizeof...(Args) != 0) : max_iter_(max_iter), tol_(tol), step_(step), mem_size_(mem_size) {
179- // fdapde_assert(mem_size_ >= 0);
180- // }
181- // LBFGS(int max_iter, double tol, double step, int mem_size, Args&&... callbacks) :
182- // callbacks_(std::make_tuple(std::forward<Args>(callbacks)...)),
183- // max_iter_(max_iter),
184- // tol_(tol),
185- // step_(step),
186- // mem_size_(mem_size) {
187- // assert(mem_size_ >= 0);
188- // }
189- // // copy semantic
190- // LBFGS(const LBFGS& other) :
191- // callbacks_(other.callbacks_),
192- // max_iter_(other.max_iter_), tol_(other.tol_), step_(other.step_), mem_size_(other.mem_size_) { }
193- // LBFGS& operator=(const LBFGS& other) {
194- // callbacks_ = other.callbacks_;
195- // max_iter_ = other.max_iter_;
196- // tol_ = other.tol_;
197- // step_ = other.step_;
198- // mem_size_ = other.mem_size_;
199- // return *this;
200- // }
201-
202- // template <typename ObjectiveT, typename... Functor>
203- // requires(sizeof...(Functor) < 2) && ((requires(Functor f, double value) { f(value); }) && ...)
204- // vector_t optimize(ObjectiveT&& objective, const vector_t& x0, Functor&&... func) {
205- // fdapde_static_assert(
206- // std::is_same<decltype(std::declval<ObjectiveT>().operator()(vector_t())) FDAPDE_COMMA double>::value,
207- // INVALID_CALL_TO_OPTIMIZE__OBJECTIVE_FUNCTOR_NOT_ACCEPTING_VECTORTYPE);
208- // bool stop = false; // asserted true in case of forced stop
209- // double error = 0;
210- // double gamma = 1.0;
211- // auto grad = objective.gradient();
212- // n_iter_ = 0;
213- // h = step_;
214- // x_old = x0, x_new = x0;
215- // vector_t zero;
216- // if constexpr (N == Dynamic) {
217- // zero = vector_t::Zero(x0.rows());
218- // } else {
219- // zero = vector_t::Zero();
220- // }
221- // update = zero;
222- // grad_old = grad(x_old);
223- // if (grad_old.isApprox(zero)) { // already at stationary point
224- // optimum_ = x_old;
225- // value_ = objective(optimum_);
226- // if constexpr (sizeof...(Functor) == 1) { (func(value_), ...); }
227- // return optimum_;
228- // }
229- // error = grad_old.norm();
230- // x_mem_.resize(x0.rows(), mem_size_);
231- // grad_mem_.resize(x0.rows(), mem_size_);
232-
233- // while (n_iter_ < max_iter_ && error > tol_ && !stop) {
234- // // compute update direction
235- // vector_t q = grad_old;
236- // int current_mem = n_iter_ < mem_size_ ? n_iter_ : mem_size_;
237- // std::vector<double> alpha(current_mem, 0);
238- // for (int i = 0; std::cmp_less(i, current_mem); ++i) {
239- // int k = (n_iter_ + mem_size_ - i - 1) % mem_size_;
240- // alpha[i] = x_mem_.col(k).dot(q) / grad_mem_.col(k).dot(x_mem_.col(k));
241- // q -= alpha[i] * grad_mem_.col(k);
242- // std::cout << "aggiorno q" << std::endl;
243- // }
244- // // H_0^k = I (initial guess of the inverse hessian)
245- // std::cout << "q: " << q.transpose() << std::endl;
246- // std::cout << "gamma: " << gamma << std::endl;
247-
248- // update = -gamma * q;
249-
250- // for (int i = current_mem - 1; i >= 0; --i) {
251- // int k = (n_iter_ + mem_size_ - i - 1) % mem_size_;
252- // double beta = grad_mem_.col(k).dot(update) / grad_mem_.col(k).dot(x_mem_.col(k));
253- // update -= x_mem_.col(k) * (alpha[i] + beta);
254- // }
255-
256- // std::cout << update << std::endl;
257- // std::cout << "----" << std::endl;
258-
259-
260- // stop |= internals::exec_adapt_hooks(*this, objective, callbacks_);
261- // // update along descent direction
262- // std::cout << "h: " << h << std::endl;
263- // std::cout << "x_old: " << x_old.transpose() << std::endl;
264- // x_new = x_old + h * update;
265- // grad_new = grad(x_new);
266-
267-
268- // std::cout << "grad_new: " << grad_new.transpose() << std::endl;
269- // std::cout << "x_new: " << x_new.transpose() << std::endl;
270-
271-
272- // if (grad_new.isApprox(zero)) { // already at stationary point
273- // optimum_ = x_old;
274- // value_ = objective(optimum_);
275- // if constexpr (sizeof...(Functor) == 1) { (func(value_), ...); }
276- // return optimum_;
277- // }
278- // // mem update
279- // // update inverse Hessian approximation
280- // int col_idx = n_iter_ % mem_size_;
281- // grad_mem_.col(col_idx) = grad_new - grad_old;
282- // x_mem_.col(col_idx) = x_new - x_old;
283- // gamma = x_mem_.col(col_idx).dot(grad_mem_.col(col_idx)) / grad_mem_.col(col_idx).norm();
284-
285- // std::cout << "q: " << q.transpose() << std::endl;
286- // std::cout << "gamma: " << gamma << std::endl;
287-
288- // // prepare next iteration
289- // if constexpr (sizeof...(Functor) == 1) { (func(objective(x_old)), ...); }
290- // error = grad_new.norm();
291- // // stop |=
292- // // (execute_post_update_step(*this, objective, callbacks_) || execute_stopping_criterion(*this, objective));
293- // x_old = x_new;
294- // grad_old = grad_new;
295- // ++n_iter_;
296- // }
297- // optimum_ = x_old;
298- // value_ = objective(optimum_);
299- // if constexpr (sizeof...(Functor) == 1) { (func(value_), ...); }
300- // return optimum_;
301- // }
302- // // observers
303- // vector_t optimum() const { return optimum_; }
304- // double value() const { return value_; }
305- // int n_iter() const { return n_iter_; }
306- // };
307-
308- // } // namespace fdapde
309-
310- // #endif // __FDAPDE_LBFGS_H__
0 commit comments