diff --git a/CMakeLists.txt b/CMakeLists.txt index 246cfcd3d..b218277c6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -154,6 +154,7 @@ if (ENABLE_EXAMPLES) local_dw_csv parametric_tw_csv parametric_dw_csv + heat_conduction_incdmd parametric_dmdc_heat_conduction) set(example_directories prom @@ -180,6 +181,7 @@ if (ENABLE_EXAMPLES) dmd dmd dmd + dmd dmd) list(LENGTH examples len1) diff --git a/examples/dmd/heat_conduction_incdmd.cpp b/examples/dmd/heat_conduction_incdmd.cpp new file mode 100644 index 000000000..34ad00065 --- /dev/null +++ b/examples/dmd/heat_conduction_incdmd.cpp @@ -0,0 +1,690 @@ +/****************************************************************************** + * + * Copyright (c) 2013-2024, Lawrence Livermore National Security, LLC + * and other libROM project developers. See the top-level COPYRIGHT + * file for details. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + *****************************************************************************/ + +// libROM MFEM Example: Heat equation solver replaced by +// incremental DMD on the fly (adapted from ex16p.cpp) +// +// Compile with: make heat_conduction_incdmd +// +// ================================================================================= +// +// Sample runs and results for DMD: +// +// Command 1: +// mpirun -np 8 heat_conduction_incdmd --inc +// +// Output 1: +// Residual of DMD solution is: 0.064594653 +// +// Command 2: +// mpirun -np 8 heat_conduction_incdmd --noinc +// +// Output 2: +// Residual of DMD solution is: 0.064594653 + +// +// ================================================================================= +// +// Description: This example constructs a reduced-order model for a nonlinear +// heat equation with incremental DMD. DMD predicts a provisional solution +// every time step, and if the prediction is accurate enough, it replaces the +// full-order model solution as a new snapshot. The full-order model is based +// on ex16p.cpp of MFEM, and this example only works with backward Euler. +// + +#include "mfem.hpp" +#include "algo/DMD.h" +#include "algo/NonuniformDMD.h" +#include "algo/IncrementalDMD.h" +#include "linalg/Vector.h" +#include +#include +#include + +using namespace std; +using namespace mfem; + +/** After spatial discretization, the conduction model can be written as: + * + * du/dt = M^{-1}(-Ku) + * + * where u is the vector representing the temperature, M is the mass matrix, + * and K is the diffusion operator with diffusivity depending on u: + * (\kappa + \alpha u). + * + * Class ConductionOperator represents the right-hand side of the above ODE. + */ +class ConductionOperator : public TimeDependentOperator +{ +protected: + ParFiniteElementSpace &fespace; + Array ess_tdof_list; // this list remains empty for pure Neumann b.c. + + ParBilinearForm *M; + ParBilinearForm *K; + + HypreParMatrix Mmat; + HypreParMatrix Kmat; + HypreParMatrix *T; // T = M + dt K + double current_dt; + + CGSolver M_solver; // Krylov solver for inverting the mass matrix M + HypreSmoother M_prec; // Preconditioner for the mass matrix M + + CGSolver T_solver; // Implicit solver for T = M + dt K + HypreSmoother T_prec; // Preconditioner for the implicit solver + + double alpha, kappa; + + mutable Vector z; // auxiliary vector + +public: + ConductionOperator(ParFiniteElementSpace &f, double alpha, double kappa, + const Vector &u, double tol_cg); + + virtual void Mult(const Vector &u, Vector &du_dt) const; + /** Solve the Backward-Euler equation: k = f(u + dt*k, t), for the unknown k. + This is the only requirement for high-order SDIRK implicit integration.*/ + virtual void ImplicitSolve(const double dt, const Vector &u, Vector &k); + + /// Update the diffusion BilinearForm K using the given true-dof vector `u`. + void SetParameters(const Vector &u); + + Vector* Residual(const double dt, const Vector u0, const Vector u1); + + virtual ~ConductionOperator(); +}; + +double InitialTemperature(const Vector &x); + +int main(int argc, char *argv[]) +{ + // 1. Initialize MPI. + int num_procs, myid; + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &num_procs); + MPI_Comm_rank(MPI_COMM_WORLD, &myid); + + // 2. Parse command-line options. + const char *mesh_file = "../data/star.mesh"; + int ser_ref_levels = 2; + int par_ref_levels = 1; + int order = 2; + int ode_solver_type = 1; + double t_final = 1.0; + double dt = 1.0e-2; + double alpha = 1.0e-2; + double kappa = 0.5; + double ef = 0.9999; + int rdim = 1; + bool visualization = true; + bool visit = false; + int vis_steps = 5; + bool use_nonuniform = false; + bool adios2 = false; + + double tol_cg = 1e-12; // tolerance for CG solver + double tol_dmd = 1e-2; // tolerance for DMD accuracy + double tol_dep = 1e-6; // tolerance for linear dependence + + bool use_incremental = true; // use incremental SVD + + int precision = 8; + cout.precision(precision); + + OptionsParser args(argc, argv); + args.AddOption(&mesh_file, "-m", "--mesh", + "Mesh file to use."); + args.AddOption(&ser_ref_levels, "-rs", "--refine-serial", + "Number of times to refine the mesh uniformly in serial."); + args.AddOption(&par_ref_levels, "-rp", "--refine-parallel", + "Number of times to refine the mesh uniformly in parallel."); + args.AddOption(&order, "-o", "--order", + "Order (degree) of the finite elements."); + args.AddOption(&t_final, "-tf", "--t-final", + "Final time; start time is 0."); + args.AddOption(&dt, "-dt", "--time-step", + "Time step."); + args.AddOption(&alpha, "-a", "--alpha", + "Alpha coefficient."); + args.AddOption(&kappa, "-k", "--kappa", + "Kappa coefficient offset."); + args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", + "--no-visualization", + "Enable or disable GLVis visualization."); + args.AddOption(&visit, "-visit", "--visit-datafiles", "-no-visit", + "--no-visit-datafiles", + "Save data files for VisIt (visit.llnl.gov) visualization."); + args.AddOption(&vis_steps, "-vs", "--visualization-steps", + "Visualize every n-th timestep."); + args.AddOption(&adios2, "-adios2", "--adios2-streams", "-no-adios2", + "--no-adios2-streams", + "Save data using adios2 streams."); + args.AddOption(&ef, "-ef", "--energy_fraction", + "Energy fraction for DMD."); + args.AddOption(&rdim, "-rdim", "--rdim", + "Reduced dimension for DMD."); + args.AddOption(&use_nonuniform, "-nonunif", "--nonunif", "-no-nonunif", + "--no-nonunif", + "Use NonuniformDMD."); + + args.AddOption(&tol_cg, "-tolcg", "--tolcg", "CG tolerance."); + args.AddOption(&tol_dmd, "-toldmd", "--toldmd", "DMD accuracy."); + args.AddOption(&tol_dep, "-toldep", "--toldep", + "Linear dependence of snapshots."); + + args.AddOption(&use_incremental, "-inc", "--inc", + "-noinc", "--noinc", + "Incremental SVD."); + + args.Parse(); + if (!args.Good()) + { + args.PrintUsage(cout); + MPI_Finalize(); + return 1; + } + + if (myid == 0) + { + args.PrintOptions(cout); + } + + // 3. Read the serial mesh from the given mesh file on all processors. We can + // handle triangular, quadrilateral, tetrahedral and hexahedral meshes + // with the same code. + Mesh *mesh = new Mesh(mesh_file, 1, 1); + int dim = mesh->Dimension(); + + // 4. Define the ODE solver used for time integration. Several implicit + // singly diagonal implicit Runge-Kutta (SDIRK) methods, as well as + // explicit Runge-Kutta methods are available. + ODESolver *ode_solver; + switch (ode_solver_type) + { + // Implicit L-stable methods + case 1: + ode_solver = new BackwardEulerSolver; + break; + case 2: + ode_solver = new SDIRK23Solver(2); + break; + case 3: + ode_solver = new SDIRK33Solver; + break; + // Explicit methods + case 11: + ode_solver = new ForwardEulerSolver; + break; + case 12: + ode_solver = new RK2Solver(0.5); + break; // midpoint method + case 13: + ode_solver = new RK3SSPSolver; + break; + case 14: + ode_solver = new RK4Solver; + break; + case 15: + ode_solver = new GeneralizedAlphaSolver(0.5); + break; + // Implicit A-stable methods (not L-stable) + case 22: + ode_solver = new ImplicitMidpointSolver; + break; + case 23: + ode_solver = new SDIRK23Solver; + break; + case 24: + ode_solver = new SDIRK34Solver; + break; + default: + cout << "Unknown ODE solver type: " << ode_solver_type << '\n'; + delete mesh; + return 3; + } + + // 5. Refine the mesh in serial to increase the resolution. In this example + // we do 'ser_ref_levels' of uniform refinement, where 'ser_ref_levels' is + // a command-line parameter. + for (int lev = 0; lev < ser_ref_levels; lev++) + { + mesh->UniformRefinement(); + } + + // 6. Define a parallel mesh by a partitioning of the serial mesh. Refine + // this mesh further in parallel to increase the resolution. Once the + // parallel mesh is defined, the serial mesh can be deleted. + ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); + delete mesh; + for (int lev = 0; lev < par_ref_levels; lev++) + { + pmesh->UniformRefinement(); + } + + // 7. Define the vector finite element space representing the current and the + // initial temperature, u_ref. + H1_FECollection fe_coll(order, dim); + ParFiniteElementSpace fespace(pmesh, &fe_coll); + + int fe_size = fespace.GlobalTrueVSize(); + if (myid == 0) + { + cout << "Number of temperature unknowns: " << fe_size << endl; + } + + ParGridFunction u_gf(&fespace); + + // 8. Set the initial conditions for u. All boundaries are considered + // natural. + FunctionCoefficient u_0(InitialTemperature); + u_gf.ProjectCoefficient(u_0); + Vector u; + u_gf.GetTrueDofs(u); + + // 9. Initialize the conduction operator and the VisIt visualization. + ConductionOperator oper(fespace, alpha, kappa, u, tol_cg); + + VisItDataCollection visit_dc("Heat_Conduction", pmesh); + visit_dc.RegisterField("temperature", &u_gf); + if (visit) + { + visit_dc.SetCycle(0); + visit_dc.SetTime(0.0); + visit_dc.Save(); + } + + // Optionally output a BP (binary pack) file using ADIOS2. This can be + // visualized with the ParaView VTX reader. +#ifdef MFEM_USE_ADIOS2 + ADIOS2DataCollection* adios2_dc = NULL; + if (adios2) + { + std::string postfix(mesh_file); + postfix.erase(0, std::string("../data/").size() ); + postfix += "_o" + std::to_string(order); + postfix += "_solver" + std::to_string(ode_solver_type); + const std::string collection_name = "heat_conduction-p-" + postfix + ".bp"; + + adios2_dc = new ADIOS2DataCollection(MPI_COMM_WORLD, collection_name, pmesh); + adios2_dc->SetParameter("SubStreams", std::to_string(num_procs/2) ); + adios2_dc->RegisterField("temperature", &u_gf); + adios2_dc->SetCycle(0); + adios2_dc->SetTime(0.0); + adios2_dc->Save(); + } +#endif + + socketstream sout; + if (visualization) + { + char vishost[] = "localhost"; + int visport = 19916; + sout.open(vishost, visport); + sout << "parallel " << num_procs << " " << myid << endl; + int good = sout.good(), all_good; + MPI_Allreduce(&good, &all_good, 1, MPI_INT, MPI_MIN, pmesh->GetComm()); + if (!all_good) + { + sout.close(); + visualization = false; + if (myid == 0) + { + cout << "Unable to connect to GLVis server at " + << vishost << ':' << visport << endl; + cout << "GLVis visualization disabled.\n"; + } + } + else + { + sout.precision(precision); + sout << "solution\n" << *pmesh << u_gf; + sout << flush; + } + } + + // 10. Perform time-integration (looping over the time iterations, ti, with a + // time-step dt). + ode_solver->Init(oper); + double t = 0.0; + vector ts; + + // 11. Create DMD object and take initial sample. + u_gf.SetFromTrueDofs(u); + + CAROM::DMD* dmd_u; + if (use_incremental) { + int Nt = static_cast(t_final/dt) + 1; + CAROM::Options svd_options(u.Size(), Nt, true, false); + svd_options.setIncrementalSVD(1e-12, dt, 1e-6, 10.0, false, true, false); + svd_options.setMaxBasisDimension(100); + svd_options.setStateIO(false, false); + svd_options.setDebugMode(false); + std::string svd_base_file_name = ""; + dmd_u = new CAROM::IncrementalDMD(u.Size(), dt, + svd_options, + svd_base_file_name, + false); + } + else { + dmd_u = new CAROM::DMD(u.Size(), dt); + } + + // Push the initial snapshot. + dmd_u->takeSample(u.GetData(), t); + ts.push_back(t); + + // 13. Time intergration with on-the-fly DMD construction. + if (myid == 0 && rdim != -1 && ef != -1) + { + std::cout << "Both rdim and ef are set. ef will be ignored." << std::endl; + } + + bool dmd_trained = false; + bool last_step = false; + double res, err_proj_norm; + Vector u_dmd(u.Size()); // DMD prediction + Vector* res_vec = NULL; + + for (int ti = 1; !last_step; ti++) + { + if (myid == 0) std::cout << "\nIteration " << ti << std::endl; + + if (t + dt >= t_final - dt/2) + { + last_step = true; + } + + // ROM solve + if (dmd_trained) + { + if (myid == 0) { + std::cout << "DMD exists: make prediction" << std::endl; + } + + CAROM::Vector* u_pre = new CAROM::Vector(u.GetData(), u.Size(), + true, true); // previous solution + + if (use_incremental) { + // Incremental DMD + u_dmd = dmd_u->predict_dt(u_pre)->getData(); + } + else { + // Vanilla DMD + dmd_u->projectInitialCondition(u_pre); // offset to u_pre + CAROM::Vector* u_dmd_pred = dmd_u->predict(dt); + u_dmd = u_dmd_pred->getData(); // copy data to MFEM::Vector + delete u_dmd_pred; + } + + res_vec = oper.Residual(dt, u, u_dmd); + res = res_vec->Norml2()*res_vec->Norml2(); // Residual (L2): not distributed + delete res_vec; + MPI_Allreduce(MPI_IN_PLACE, &res, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); + + if (myid == 0) { + std::cout << "Residual of DMD solution is: " << res << std::endl; + } + + if (res < tol_dmd) { + // DMD prediction is accurate + if (myid == 0) { + std::cout << "DMD prediction accurate: use it" << std::endl; + } + u = u_dmd; // use DMD solution as new snapshot + t += dt; + } + else { + // FOM solve + if (myid == 0) { + std::cout << "DMD prediction not accurate: call FOM" << std::endl; + } + ode_solver->Step(u, t, dt); + } + + if (!use_incremental) { + // If using vanilla DMD, + // check the linear dependence of the new snapshot + CAROM::Vector* u_new = new CAROM::Vector(u.GetData(), u.Size(), + true, false); + dmd_u->projectInitialCondition(u_pre); + + CAROM::Vector* u_dmd_pred_0 = dmd_u->predict(0.0); + Vector u_new_proj(u_dmd_pred_0->getData(), u.Size()); // projection + Vector err_proj(u_pre->getData(), u.Size()); + err_proj -= u_new_proj; // error = u - proj(u) + err_proj_norm = err_proj.Norml2();// / u.Norml2(); // relative norm + std::cout << "Projection error: " << err_proj_norm << std::endl; + delete u_dmd_pred_0; + + // Increment no. of modes if solution is inaccurate and + // the new snapshot is linearly independent + if (res > tol_dmd && err_proj_norm > tol_dep) { + rdim += 1; + } + delete u_new; + } + + delete u_pre; + + } + else // No constructed DMD: FOM solve + { + if (myid == 0) { + std::cout << "No DMD found: solve FOM" << std::endl; + } + ode_solver->Step(u, t, dt); + } + + // Append new snapshot + dmd_u->takeSample(u.GetData(), t); + ts.push_back(t); + + // DMD training + if (dmd_u->getNumSamples() > rdim) + { + if (myid == 0) std::cout << "Creating DMD with rdim: " << rdim << std::endl; + dmd_u->train(rdim); + dmd_trained = true; + } + + // Visualize solutions + u_gf.SetFromTrueDofs(u); + + if (last_step || (ti % vis_steps) == 0) + { + if (myid == 0) + { + cout << "step " << ti << ", t = " << t << endl; + } + + u_gf.SetFromTrueDofs(u); + if (visualization) + { + sout << "parallel " << num_procs << " " << myid << "\n"; + sout << "solution\n" << *pmesh << u_gf << flush; + } + + if (visit) + { + visit_dc.SetCycle(ti); + visit_dc.SetTime(t); + visit_dc.Save(); + } + +#ifdef MFEM_USE_ADIOS2 + if (adios2) + { + adios2_dc->SetCycle(ti); + adios2_dc->SetTime(t); + adios2_dc->Save(); + } +#endif + } + oper.SetParameters(u); + } + +#ifdef MFEM_USE_ADIOS2 + if (adios2) + { + delete adios2_dc; + } +#endif + + // 12. Save the final solution in parallel. This output can be viewed later + // using GLVis: "glvis -np -m heat_conduction-mesh -g heat_conduction-final". + { + ostringstream sol_name; + sol_name << "heat_conduction-final." << setfill('0') << setw(6) << myid; + ofstream osol(sol_name.str().c_str()); + osol.precision(precision); + u_gf.Save(osol); + } + + // 13. Free the used memory. + delete ode_solver; + delete pmesh; + + MPI_Finalize(); + + return 0; +} + +ConductionOperator::ConductionOperator(ParFiniteElementSpace &f, double al, + double kap, const Vector &u, double tol_cg) + : TimeDependentOperator(f.GetTrueVSize(), 0.0), fespace(f), M(NULL), K(NULL), + T(NULL), current_dt(0.0), + M_solver(f.GetComm()), T_solver(f.GetComm()), z(height) +{ + const double rel_tol = tol_cg; + + M = new ParBilinearForm(&fespace); + M->AddDomainIntegrator(new MassIntegrator()); + M->Assemble(0); // keep sparsity pattern of M and K the same + M->FormSystemMatrix(ess_tdof_list, Mmat); + + M_solver.iterative_mode = false; + M_solver.SetRelTol(rel_tol); + M_solver.SetAbsTol(0.0); + M_solver.SetMaxIter(1000); + M_solver.SetPrintLevel(0); + M_prec.SetType(HypreSmoother::Jacobi); + M_solver.SetPreconditioner(M_prec); + M_solver.SetOperator(Mmat); + + alpha = al; + kappa = kap; + + T_solver.iterative_mode = false; + T_solver.SetRelTol(rel_tol); + T_solver.SetAbsTol(0.0); + T_solver.SetMaxIter(1000); + T_solver.SetPrintLevel(0); + T_solver.SetPreconditioner(T_prec); + + SetParameters(u); +} + +void ConductionOperator::Mult(const Vector &u, Vector &du_dt) const +{ + // Compute: + // du_dt = M^{-1}*-K(u) + // for du_dt + Kmat.Mult(u, z); + z.Neg(); // z = -z + M_solver.Mult(z, du_dt); +} + +void ConductionOperator::ImplicitSolve(const double dt, + const Vector &u, Vector &du_dt) +{ + // Solve the equation: + // du_dt = M^{-1}*[-K(u + dt*du_dt)] + // for du_dt + if (!T) + { + T = Add(1.0, Mmat, dt, Kmat); + current_dt = dt; + T_solver.SetOperator(*T); + } + MFEM_VERIFY(dt == current_dt, ""); // SDIRK methods use the same dt + Kmat.Mult(u, z); + z.Neg(); + T_solver.Mult(z, du_dt); +} + +void ConductionOperator::SetParameters(const Vector &u) +{ + ParGridFunction u_alpha_gf(&fespace); + u_alpha_gf.SetFromTrueDofs(u); + for (int i = 0; i < u_alpha_gf.Size(); i++) + { + u_alpha_gf(i) = kappa + alpha*u_alpha_gf(i); + } + + delete K; + K = new ParBilinearForm(&fespace); + + GridFunctionCoefficient u_coeff(&u_alpha_gf); + + K->AddDomainIntegrator(new DiffusionIntegrator(u_coeff)); + K->Assemble(0); // keep sparsity pattern of M and K the same + K->FormSystemMatrix(ess_tdof_list, Kmat); + delete T; + T = NULL; // re-compute T on the next ImplicitSolve +} + +Vector* ConductionOperator::Residual(const double dt, + const Vector u0, const Vector u1) +{ + // + // Compute res(u0, u1) = [(M+dt*K(u0))*u1 - M*u0] / dt / norm(K*u0) + // + T = Add(1.0, Mmat, dt, Kmat); // T=M+dt*K + Vector* res = new Vector(u1.Size()); + T->Mult(u1, *res); // (M+dt*K)*u^n + Kmat.Mult(u0, z); // z=K*u^{n-1} + + double norm = z.Norml2()*z.Norml2(); // norm(dt*K*u^{n-1}): local + MPI_Allreduce(MPI_IN_PLACE, + &norm, + 1, + MPI_DOUBLE, + MPI_SUM, + MPI_COMM_WORLD); + norm = sqrt(norm) * dt; + Mmat.Mult(u0, z); // z=M*u^{n-1} + delete T; + T = NULL; + + *res -= z; + *res /= norm; + + return res; // Vector is local. +} + +ConductionOperator::~ConductionOperator() +{ + delete T; + delete M; + delete K; +} + +double InitialTemperature(const Vector &x) +{ + if (x.Norml2() < 0.5) + { + return 2.0; + } + else + { + return 1.0; + } +} diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 0e546061d..d7e384338 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -38,6 +38,7 @@ set(module_list algo/DMDc algo/AdaptiveDMD algo/NonuniformDMD + algo/IncrementalDMD algo/DifferentialEvolution algo/greedy/GreedyCustomSampler algo/greedy/GreedyRandomSampler diff --git a/lib/algo/DMD.h b/lib/algo/DMD.h index ba3750251..71b524fdc 100644 --- a/lib/algo/DMD.h +++ b/lib/algo/DMD.h @@ -215,6 +215,15 @@ class DMD */ void summary(std::string base_file_name); + /** + * @brief Make a prediction for just one dt. + * To be overridden by IncrementalDMD.h + */ + virtual Vector* predict_dt(Vector* u) { + projectInitialCondition(u); + return predict(d_dt); + } + protected: /** * @brief Obtain DMD model interpolant at desired parameter point by diff --git a/lib/algo/IncrementalDMD.cpp b/lib/algo/IncrementalDMD.cpp new file mode 100644 index 000000000..9b3836e17 --- /dev/null +++ b/lib/algo/IncrementalDMD.cpp @@ -0,0 +1,292 @@ +/****************************************************************************** + * + * Copyright (c) 2013-2024, Lawrence Livermore National Security, LLC + * and other libROM project developers. See the top-level COPYRIGHT + * file for details. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + *****************************************************************************/ + +// Description: Implementation of the incremental DMD algorithm. + +#include "IncrementalDMD.h" +#include "linalg/Matrix.h" +#include "utils/Utilities.h" + +#include "mfem.hpp" + +using namespace mfem; + +namespace CAROM { + +IncrementalDMD::IncrementalDMD(int dim, + double dt, + Options svd_options, + std::string svd_base_file_name, + bool alt_output_basis, + Vector* state_offset) : + DMD(dim, dt, alt_output_basis, state_offset) +{ + bg = new BasisGenerator(svd_options, + true, + svd_base_file_name); + svd = new IncrementalSVDBrand(svd_options, + svd_base_file_name); +} + +IncrementalDMD::~IncrementalDMD() +{ + delete bg; + delete svd; +} + +void +IncrementalDMD::load(std::string base_file_name) +{ + CAROM_ASSERT(!base_file_name.empty()); + + DMD::load(base_file_name); +} + +void +IncrementalDMD::save(std::string base_file_name) +{ + CAROM_ASSERT(!base_file_name.empty()); + CAROM_VERIFY(d_trained); + + DMD::save(base_file_name); +} + +Vector* +IncrementalDMD::predict_dt(Vector* u) +{ + Matrix* Up_new = NULL; + Matrix* U_new = NULL; + if (svd->d_Up_pre->numColumns() == svd->d_Uq->numRows()) { + // Liearly dependent sample + Up_new = svd->d_Up_pre->mult(svd->d_Uq); + U_new = new Matrix(*(svd->d_U_pre)); + } + else { + // Linearly independent sample + int r = svd->d_Up_pre->numColumns(); + Up_new = new Matrix(r+1, r+1, false); + for (int i = 0; i < r; i++) { + for (int j = 0; j < r; j++) { + Up_new->item(i, j) = svd->d_Up_pre->item(i, j); + } + } + Up_new->item(r, r) = 1; + Up_new = Up_new->mult(svd->d_Uq); + U_new = new Matrix(d_dim, r+1, true); + for (int i = 0; i < d_dim; i++) { + for (int j = 0; j < r; j++) { + U_new->item(i, j) = svd->d_U_pre->item(i, j); + } + U_new->item(i, r) = svd->d_p->item(i); + } + } + + Vector* UTu = U_new->transposeMult(u); + Vector* u_proj = Up_new->transposeMult(UTu); + Vector* Atildeu = d_A_tilde->mult(u_proj); + Vector* UpAtildeu = Up_new->mult(Atildeu); + Vector* pred = U_new->mult(UpAtildeu); + + delete UTu; + delete Atildeu; + delete UpAtildeu; + delete u_proj; + delete Up_new; + delete U_new; + + return pred; +} + +void +IncrementalDMD::train(int k, const Matrix* W0, double linearity_tol) +{ + const Matrix* f_snapshots = NULL; + //CAROM_VERIFY(f_snapshots->numColumns() > 1); + //CAROM_VERIFY(k > 0 && k <= f_snapshots->numColumns() - 1); + d_energy_fraction = -1.0; + d_k = k; + updateDMD(f_snapshots); + delete f_snapshots; +} + +void +IncrementalDMD::updateDMD(const Matrix* f_snapshots) +{ + /* Incremental SVD + * + * Does everything below all at once: + * (1) Initialize SVD if not initialized + * (2) Check linear dependence of new snapshot + * (3) Rank-1 update + * + */ + int num_snapshots = d_snapshots.size(); + double* u_in = d_snapshots[num_snapshots-2]->getData(); + + svd->takeSample(u_in, false); + + int num_samples = svd->getNumSamples(); + + if (num_samples == 1) + { + Matrix* d_basis = new Matrix(*(svd->getSpatialBasis())); + Matrix* d_basis_right = new Matrix(*(svd->getTemporalBasis())); + Vector* d_sv = new Vector(*(svd->getSingularValues())); + Matrix* d_S_inv = new Matrix(1, 1, false); + d_S_inv->item(0, 0) = 1/d_sv->item(0); + + Matrix* f_snapshots_out = new Matrix(d_snapshots.back()->getData(), + d_dim, 1, true); + Matrix* br_Sinv = d_basis_right->mult(d_S_inv); + Matrix* f_br_Sinv = f_snapshots_out->mult(br_Sinv); + + d_A_tilde = d_basis->transposeMult(f_br_Sinv); + + delete br_Sinv; + delete f_br_Sinv; + delete f_snapshots_out; + + delete d_basis; + delete d_basis_right; + delete d_sv; + delete d_S_inv; + } + else + { + Vector u_vec(u_in, svd->d_dim, true); + Vector e_proj(u_in, svd->d_dim, true); + + Vector* UTe = svd->d_U_pre->transposeMult(e_proj); + Vector* UUTe = svd->d_U_pre->mult(UTe); + e_proj -= *UUTe; + + delete UTe; + delete UUTe; + + UTe = svd->d_U_pre->transposeMult(e_proj); + UUTe = svd->d_U_pre->mult(UTe); + e_proj -= *UUTe; + + delete UTe; + delete UUTe; + + double k = e_proj.inner_product(e_proj); + if (k <= 0) k = 0; + else k = sqrt(k); + + if ( k > svd->d_linearity_tol ) { + if (d_rank == 0) { + std::cout << "Added linearly independent sample" << std::endl; + } + Matrix* d_A_tilde_tmp = new Matrix(num_samples, num_samples, false); + Vector* u_new = new Vector(d_snapshots[num_snapshots-1]->getData(), + d_dim, true); + + Vector* UTu = svd->d_U_pre->transposeMult(u_new); + Vector* fTp = new Vector(num_snapshots-2, false); + for (int i = 0; i < num_snapshots-2; i++) { + fTp->item(i) = d_snapshots[i+1]->inner_product(svd->d_p); + } + + Vector* UpTUTu = svd->d_Up_pre->transposeMult(UTu); + Vector* WTfTp = new Vector(num_samples-1, false); + for (int i = 0; i < num_samples-1; i++) { + double d = 0.0; + for (int j = 0; j < num_snapshots-2; j++) { + d += svd->d_W->item(j,i)*fTp->item(j); + } + WTfTp->item(i) = d; + } + Vector* WpTWTfTp = svd->d_Wp_pre->transposeMult(WTfTp); + for (int i = 0; i < num_samples-1; i++) { + for (int j = 0; j < num_samples-1; j++) { + d_A_tilde_tmp->item(i, j) = d_A_tilde->item(i, j) * svd->d_S_pre->item(j); + } + d_A_tilde_tmp->item(i, num_samples-1) = UpTUTu->item(i); + } + for (int j = 0; j < num_samples-1; j++) { + d_A_tilde_tmp->item(num_samples-1, j) = WpTWTfTp->item(j); + } + + d_A_tilde_tmp->item(num_samples-1, + num_samples-1) = svd->d_p->inner_product(u_new); + + Matrix* WSinv = svd->d_Wq->mult(svd->d_Sq_inv); + Matrix* AWSinv = d_A_tilde_tmp->mult(WSinv); + + Matrix* d_A_tilde_new = svd->d_Uq->transposeMult(AWSinv); + + delete UTu; + delete fTp; + delete WTfTp; + delete WpTWTfTp; + delete UpTUTu; + delete WSinv; + delete AWSinv; + + delete d_A_tilde; + d_A_tilde = d_A_tilde_new; + + delete u_new; + delete d_A_tilde_tmp; + } + //} + else + { + if (d_rank == 0) { + std::cout << "Added linearly dependent sample" << std::endl; + } + + Matrix* d_A_tilde_tmp = new Matrix(num_samples, num_samples+1, false); + Vector* u_new = new Vector(d_snapshots[num_snapshots-1]->getData(), + d_dim, true); + + Vector* UTu = new Vector(num_samples, false); + + svd->d_U_pre->transposeMult(*u_new, UTu); + Vector* UpTUTu = svd->d_Up_pre->transposeMult(UTu); + + for (int i = 0; i < num_samples; i++) { + for (int j = 0; j < num_samples; j++) { + d_A_tilde_tmp->item(i, j) = d_A_tilde->item(i, j) * svd->d_S_pre->item(j); + } + d_A_tilde_tmp->item(i, num_samples) = UpTUTu->item(i); + } + + Matrix* WSinv = svd->d_Wq->mult(svd->d_Sq_inv); + Matrix* AWSinv = d_A_tilde_tmp->mult(WSinv); + + Matrix* d_A_tilde_new = svd->d_Uq->transposeMult(AWSinv); + + delete UTu; + delete UpTUTu; + delete WSinv; + delete AWSinv; + + delete d_A_tilde; + d_A_tilde = d_A_tilde_new; + + delete u_new; + delete d_A_tilde_tmp; + } + } + + if (d_rank == 0) { + std::cout << "Using " << num_samples << " basis vectors out of " + << num_snapshots << " snapshots" << std::endl; + } + + d_trained = true; + + return; + +} + +} diff --git a/lib/algo/IncrementalDMD.h b/lib/algo/IncrementalDMD.h new file mode 100644 index 000000000..a97e7b35e --- /dev/null +++ b/lib/algo/IncrementalDMD.h @@ -0,0 +1,100 @@ +/****************************************************************************** + * + * Copyright (c) 2013-2024, Lawrence Livermore National Security, LLC + * and other libROM project developers. See the top-level COPYRIGHT + * file for details. + * + * SPDX-License-Identifier: (Apache-2.0 OR MIT) + * + *****************************************************************************/ + +// Description: Computes the DMD algorithm on the given snapshot matrix +// with uniform sampling time steps, using incremenatal SVD +// to update the DMD matrix. + +#ifndef included_IncrementalDMD_h +#define included_IncrementalDMD_h + +#include "DMD.h" +#include "linalg/BasisGenerator.h" +#include "linalg/svd/IncrementalSVDBrand.h" + +namespace CAROM { + +/** + * Class IncrementalDMD implements the standard DMD algorithm on the + * given snapshot matrix with uniform sampling time steps, using + * incremental SVD for update. + */ +class IncrementalDMD : public DMD +{ +public: + + /** + * @brief Constructor. Basic DMD with uniform time step size. + * + * @param[in] dim The full-order state dimension. + * @param[in] dt The dt between samples. + * @param[in] svd_options Options for SVD. + * @param[in] alt_output_basis Whether to use the alternative basis for + * output, i.e. phi = U^(+)*V*Omega^(-1)*X. + * @param[in] state_offset The state offset. + */ + IncrementalDMD(int dim, + double dt, + Options svd_options, + std::string svd_base_file_name, + bool alt_output_basis = false, + Vector* state_offset = NULL); + + /** + * @brief Destroy the IncrementalDMD object + */ + ~IncrementalDMD(); + + /** + * @brief Load the object state to a file. + * + * @param[in] base_file_name The base part of the filename to load the + * database to. + */ + void load(std::string base_file_name) override; + + /** + * @brief Save the object state to a file. + * + * @param[in] base_file_name The base part of the filename to save the + * database to. + */ + void save(std::string base_file_name) override; + + /** + * @param[in] k The number of modes to keep after doing SVD. + * @param[in] W0 The initial basis to prepend to W. + * @param[in] linearity_tol The tolerance for determining whether a column + of W is linearly independent with W0. + */ + void train(int k, + const Matrix* W0 = NULL, + double linearity_tol = 0.0) override; + + /** + * @brief Make a prediction for just one dt. + */ + Vector* predict_dt(Vector* u) override; + + +protected: + + void updateDMD(const Matrix* f_snapshots); + + BasisGenerator* bg = NULL; + IncrementalSVDBrand* svd = NULL; + +private: + +}; + +} + +#endif diff --git a/lib/linalg/BasisGenerator.h b/lib/linalg/BasisGenerator.h index 87604447e..8a604177a 100644 --- a/lib/linalg/BasisGenerator.h +++ b/lib/linalg/BasisGenerator.h @@ -262,6 +262,8 @@ class BasisGenerator const std::string & cutoffOutputPath = "", const int first_sv = 0); + friend class IncrementalDMD; + protected: /** * @brief Writer of basis vectors. diff --git a/lib/linalg/svd/IncrementalSVDBrand.cpp b/lib/linalg/svd/IncrementalSVDBrand.cpp index b4648ab58..0a987b1fc 100644 --- a/lib/linalg/svd/IncrementalSVDBrand.cpp +++ b/lib/linalg/svd/IncrementalSVDBrand.cpp @@ -19,6 +19,9 @@ #include #include +# include "mfem.hpp" +using namespace mfem; + namespace CAROM { IncrementalSVDBrand::IncrementalSVDBrand( @@ -27,7 +30,7 @@ IncrementalSVDBrand::IncrementalSVDBrand( IncrementalSVD( options, basis_file_name), - d_Up(0), + d_Up(0),d_Wp(0),d_Wp_inv(0), d_singular_value_tol(options.singular_value_tol) { CAROM_VERIFY(options.singular_value_tol >= 0); @@ -55,6 +58,9 @@ IncrementalSVDBrand::IncrementalSVDBrand( // Compute the basis. computeBasis(); } + + d_max_num_samples = options.max_num_samples; + d_max_num_basis = options.max_basis_dimension; } IncrementalSVDBrand::~IncrementalSVDBrand() @@ -84,6 +90,13 @@ IncrementalSVDBrand::~IncrementalSVDBrand() if (d_Up) { delete d_Up; } + if (d_Wp) { + delete d_Wp; + } + if (d_Wp_inv) { + delete d_Wp_inv; + } + } const Matrix* @@ -105,8 +118,7 @@ IncrementalSVDBrand::getTemporalBasis() } void -IncrementalSVDBrand::buildInitialSVD( - double* u) +IncrementalSVDBrand::buildInitialSVD(double* u) { CAROM_VERIFY(u != 0); @@ -115,41 +127,82 @@ IncrementalSVDBrand::buildInitialSVD( Vector u_vec(u, d_dim, true); double norm_u = u_vec.norm(); d_S->item(0) = norm_u; + d_S_pre = NULL; // Build d_Up for this new time interval. d_Up = new Matrix(1, 1, false); d_Up->item(0, 0) = 1.0; + d_Up_pre = new Matrix(1, 1, false); + d_Up_pre->item(0, 0) = 1.0; // Build d_U for this new time interval. d_U = new Matrix(d_dim, 1, true); for (int i = 0; i < d_dim; ++i) { d_U->item(i, 0) = u[i]/norm_u; } + d_U_pre = new Matrix(d_dim, 1, true); + for (int i = 0; i < d_dim; ++i) { + d_U_pre->item(i, 0) = u[i]/norm_u; + } // Build d_W for this new time interval. if (d_update_right_SV) { - d_W = new Matrix(1, 1, false); + // d_W is preallocated. + d_W = new Matrix(d_max_num_samples, d_max_num_basis, false); d_W->item(0, 0) = 1.0; + d_W_pre = new Matrix(d_max_num_samples, d_max_num_basis, false); + d_W_pre->item(0, 0) = 1.0; + d_Wp = new Matrix(1, 1, false); + d_Wp->item(0, 0) = 1.0; + d_Wp_pre = new Matrix(1, 1, false); + d_Wp_pre->item(0, 0) = 1.0; + d_Wp_inv = new Matrix(1, 1, false); + d_Wp_inv->item(0, 0) = 1.0; + } // We now have the first sample for the new time interval. d_num_samples = 1; d_num_rows_of_W = 1; + d_Uq = new Matrix(1, 1, false); + d_Uq->item(0, 0) = 1.0; + + d_Sq_inv = new Matrix(1, 1, false); + d_Sq_inv->item(0, 0) = 1.0; + + d_Wq = new Matrix(1, 1, false); + d_Wq->item(0, 0) = 1.0; + + d_p = new Vector(d_dim, true); + } bool IncrementalSVDBrand::buildIncrementalSVD( double* u, bool add_without_increase) { + CAROM_VERIFY(u != 0); // Compute the projection error // (accurate down to the machine precision) Vector u_vec(u, d_dim, true); Vector e_proj(u, d_dim, true); - e_proj -= *(d_U->mult(d_U->transposeMult(e_proj))); // Gram-Schmidt - e_proj -= *(d_U->mult(d_U->transposeMult(e_proj))); // Re-orthogonalization + + Vector* UTe = d_U->transposeMult(e_proj); + Vector* UUTe = d_U->mult(UTe); + e_proj -= *UUTe; // Gram-Schmidt + + delete UTe; + delete UUTe; + + UTe = d_U->transposeMult(e_proj); + UUTe = d_U->mult(UTe); + e_proj -= *UUTe; // Re-orthogonalization + + delete UTe; + delete UUTe; double k = e_proj.inner_product(e_proj); if (k <= 0) { @@ -158,6 +211,7 @@ IncrementalSVDBrand::buildIncrementalSVD( } else { k = sqrt(k); + if(d_rank == 0) std::cout << "k=" << k << std::endl; } // Use k to see if the vector addressed by u is linearly dependent @@ -189,12 +243,11 @@ IncrementalSVDBrand::buildIncrementalSVD( // Create Q. double* Q; - Vector* U_mult_u = new Vector(d_U->transposeMult(u_vec)->getData(), - d_num_samples, - false); - Vector* l = d_Up->transposeMult(U_mult_u); + Vector* UTu = d_U->transposeMult(u_vec); + Vector* l = d_Up->transposeMult(UTu); constructQ(Q, l, k); - delete U_mult_u, l; + delete l; + delete UTu; // Now get the singular value decomposition of Q. Matrix* A; @@ -215,7 +268,30 @@ IncrementalSVDBrand::buildIncrementalSVD( // This sample is linearly dependent and we are not skipping linearly // dependent samples. if(d_rank == 0) std::cout << "adding linearly dependent sample!\n"; + + // Update IncrementalDMDInternal members + delete d_Uq; + delete d_Wq; + delete d_Sq_inv; + delete d_p; + d_Uq = new Matrix(d_num_samples, d_num_samples, false); + d_Wq = new Matrix(d_num_samples+1, d_num_samples, false); + d_Sq_inv = new Matrix(d_num_samples, d_num_samples, false); + d_p = new Vector(d_dim, true); + + for (int i = 0; i < d_num_samples; i++) { + for (int j = 0; j < d_num_samples; j++) { + d_Uq->item(i, j) = A->item(i, j); + d_Wq->item(i, j) = W->item(i, j); + } + d_Sq_inv->item(i, i) = 1 / sigma->item(i, i); + } + for (int j = 0; j < d_num_samples; j++) { + d_Wq->item(d_num_samples, j) = W->item(d_num_samples, j); + } + addLinearlyDependentSample(A, W, sigma); + delete sigma; } else if (!linearly_dependent_sample) { @@ -229,8 +305,35 @@ IncrementalSVDBrand::buildIncrementalSVD( // addNewSample will assign sigma to d_S hence it should not be // deleted upon return. + + // Update IncrementalDMDInternal members + delete d_Uq; + delete d_Wq; + delete d_Sq_inv; + delete d_p; + d_Uq = new Matrix(A->getData(), + A->numRows(), + A->numColumns(), + A->distributed(), + true); + d_Wq = new Matrix(W->getData(), + W->numRows(), + W->numColumns(), + W->distributed(), + true); + d_Sq_inv = new Matrix(d_num_samples+1, d_num_samples+1, false); + for (int i = 0; i < d_num_samples+1; i++) { + d_Sq_inv->item(i, i) = 1 / sigma->item(i, i); + } + d_p = new Vector(d_dim, true); + for (int i = 0; i < d_dim; i++) { + d_p->item(i) = e_proj.item(i) / k; + } + addNewSample(j, A, W, sigma); + delete j; + delete sigma; } delete A; delete W; @@ -240,6 +343,7 @@ IncrementalSVDBrand::buildIncrementalSVD( delete W; delete sigma; } + return result; } @@ -273,6 +377,7 @@ IncrementalSVDBrand::updateSpatialBasis() // (not likely to be called anymore but left for safety) if (fabs(checkOrthogonality(d_basis)) > std::numeric_limits::epsilon()*static_cast(d_num_samples)) { + std::cout << "Re-orthogonalizing spatial basis" << std::endl; d_basis->orthogonalize(); } @@ -282,7 +387,14 @@ void IncrementalSVDBrand::updateTemporalBasis() { delete d_basis_right; - d_basis_right = new Matrix(*d_W); + Matrix* W = new Matrix(d_num_rows_of_W, d_num_samples, false); + for (int i = 0; i < d_num_rows_of_W; i++) { + for (int j = 0; j < d_num_samples; j++) { + W->item(i,j) = d_W->item(i,j); + } + } + d_basis_right = W->mult(d_Wp); + delete W; // remove the smallest singular value if it is smaller than d_singular_value_tol if ( (d_singular_value_tol != 0.0) && @@ -354,6 +466,9 @@ IncrementalSVDBrand::addLinearlyDependentSample( // Chop a row and a column off of A to form Amod. Also form // d_S by chopping a row and a column off of sigma. Matrix Amod(d_num_samples, d_num_samples, false); + delete d_S_pre; + d_S_pre = d_S; + d_S = new Vector(d_num_samples, false); for (int row = 0; row < d_num_samples; ++row) { for (int col = 0; col < d_num_samples; ++col) { Amod.item(row, col) = A->item(row, col); @@ -364,33 +479,72 @@ IncrementalSVDBrand::addLinearlyDependentSample( } } + // d_U stays unmodified but update d_Up + delete d_U_pre; + d_U_pre = new Matrix(d_U->getData(), + d_U->numRows(), + d_U->numColumns(), + d_U->distributed()); // Multiply d_Up and Amod and put result into d_Up. Matrix* Up_times_Amod = d_Up->mult(Amod); - delete d_Up; + delete d_Up_pre; + d_Up_pre = d_Up; d_Up = Up_times_Amod; - Matrix* new_d_W; + Matrix* new_d_Wp; + Matrix* new_d_Wp_inv; + Matrix* Winv; if (d_update_right_SV) { - // The new d_W is the product of the current d_W extended by another row - // and column and W. The only new value in the extended version of d_W - // that is non-zero is the new lower right value and it is 1. We will - // construct this product without explicitly forming the extended version of - // d_W. - new_d_W = new Matrix(d_num_rows_of_W+1, d_num_samples, false); - for (int row = 0; row < d_num_rows_of_W; ++row) { + new_d_Wp = new Matrix(d_num_samples, d_num_samples, false); + new_d_Wp_inv = new Matrix(d_num_samples, d_num_samples, false); + Winv = new Matrix(d_num_samples, d_num_samples, false); + for (int row = 0; row < d_num_samples; ++row) { for (int col = 0; col < d_num_samples; ++col) { - double new_d_W_entry = 0.0; + double new_d_Wp_entry = 0.0; for (int entry = 0; entry < d_num_samples; ++entry) { - new_d_W_entry += d_W->item(row, entry)*W->item(entry, col); + new_d_Wp_entry += d_Wp->item(row, entry)*W->item(entry, col); } - new_d_W->item(row, col) = new_d_W_entry; + new_d_Wp->item(row, col) = new_d_Wp_entry; } } + delete d_Wp_pre; + d_Wp_pre = d_Wp; + d_Wp = new_d_Wp; + + double norm = 0; for (int col = 0; col < d_num_samples; ++col) { - new_d_W->item(d_num_rows_of_W, col) = W->item(d_num_samples, col); + norm += W->item(d_num_samples, col)*W->item(d_num_samples, col); + } + norm = 1-norm; + for (int row = 0; row < d_num_samples; ++row) { + for (int col = 0; col < d_num_samples; ++col) { + double wW_entry = 0.0; + for (int entry = 0; entry < d_num_samples; ++entry) { + wW_entry += W->item(d_num_samples, entry)*W->item(col, entry); + } + Winv->item(row, col) = W->item(col, row) + W->item(d_num_samples, + row)*wW_entry/norm; + } + } + for (int row = 0; row < d_num_samples; ++row) { + for (int col = 0; col < d_num_samples; ++col) { + double new_d_Wp_inv_entry = 0.0; + for (int entry = 0; entry < d_num_samples; ++entry) { + new_d_Wp_inv_entry += Winv->item(row, entry)*d_Wp_inv->item(entry, col); + } + new_d_Wp_inv->item(row, col) = new_d_Wp_inv_entry; + } + } + delete d_Wp_inv; + d_Wp_inv = new_d_Wp_inv; + + for (int col = 0; col < d_num_samples; ++col) { + double new_entry = 0.0; + for (int entry = 0; entry < d_num_samples; ++entry) { + new_entry += W->item(d_num_samples, entry)*d_Wp_inv->item(entry, col); + } + d_W->item(d_num_rows_of_W, col) = new_entry; } - delete d_W; - d_W = new_d_W; ++d_num_rows_of_W; } @@ -415,31 +569,50 @@ IncrementalSVDBrand::addNewSample( } newU->item(row, d_num_samples) = j->item(row); } - delete d_U; + delete d_U_pre; + d_U_pre = d_U; d_U = newU; - Matrix* new_d_W; + //Matrix* new_d_W; + Matrix* new_d_Wp; + Matrix* new_d_Wp_inv; if (d_update_right_SV) { - // The new d_W is the product of the current d_W extended by another row - // and column and W. The only new value in the extended version of d_W - // that is non-zero is the new lower right value and it is 1. We will - // construct this product without explicitly forming the extended version of - // d_W. - new_d_W = new Matrix(d_num_rows_of_W+1, d_num_samples+1, false); - for (int row = 0; row < d_num_rows_of_W; ++row) { + new_d_Wp = new Matrix(d_num_samples+1, d_num_samples+1, false); + new_d_Wp_inv = new Matrix(d_num_samples+1, d_num_samples+1, false); + + d_W->item(d_num_rows_of_W, d_num_samples) = 1.0; + + for (int row = 0; row < d_num_samples; ++row) { for (int col = 0; col < d_num_samples+1; ++col) { - double new_d_W_entry = 0.0; + double new_d_Wp_entry = 0.0; for (int entry = 0; entry < d_num_samples; ++entry) { - new_d_W_entry += d_W->item(row, entry)*W->item(entry, col); + new_d_Wp_entry += d_Wp->item(row, entry)*W->item(entry, col); } - new_d_W->item(row, col) = new_d_W_entry; + new_d_Wp->item(row, col) = new_d_Wp_entry; } } for (int col = 0; col < d_num_samples+1; ++col) { - new_d_W->item(d_num_rows_of_W, col) = W->item(d_num_samples, col); + new_d_Wp->item(d_num_samples, col) = W->item(d_num_samples, col); } - delete d_W; - d_W = new_d_W; + delete d_Wp_pre; + d_Wp_pre = d_Wp; + d_Wp = new_d_Wp; + + for (int row = 0; row < d_num_samples+1; ++row) { + for (int col = 0; col < d_num_samples+1; ++col) { + double new_d_Wp_inv_entry = 0.0; + for (int entry = 0; entry < d_num_samples; ++entry) { + new_d_Wp_inv_entry += W->item(entry, row)*d_Wp_inv->item(entry, col); + } + new_d_Wp_inv->item(row, col) = new_d_Wp_inv_entry; + } + } + for (int row = 0; row < d_num_samples+1; ++row) { + new_d_Wp_inv->item(row, d_num_samples) = W->item(d_num_samples, row); + } + delete d_Wp_inv; + d_Wp_inv = new_d_Wp_inv; + } // The new d_Up is the product of the current d_Up extended by another row @@ -460,11 +633,12 @@ IncrementalSVDBrand::addNewSample( for (int col = 0; col < d_num_samples+1; ++col) { new_d_Up->item(d_num_samples, col) = A->item(d_num_samples, col); } - delete d_Up; + delete d_Up_pre; + d_Up_pre = d_Up; d_Up = new_d_Up; - // d_S = sigma. - delete d_S; + delete d_S_pre; + d_S_pre = d_S; int num_dim = std::min(sigma->numRows(), sigma->numColumns()); d_S = new Vector(num_dim, false); for (int i = 0; i < num_dim; i++) { @@ -475,31 +649,6 @@ IncrementalSVDBrand::addNewSample( ++d_num_samples; ++d_num_rows_of_W; - // Reorthogonalize if necessary. - long int max_U_dim; - if (d_num_samples > d_total_dim) { - max_U_dim = d_num_samples; - } - else { - max_U_dim = d_total_dim; - } - if (fabs(checkOrthogonality(d_Up)) > - std::numeric_limits::epsilon()*static_cast(max_U_dim)) { - d_Up->orthogonalize(); - } - if (fabs(checkOrthogonality(d_U)) > - std::numeric_limits::epsilon()*static_cast(max_U_dim)) { - d_U->orthogonalize(); // Will not be called, but just in case - } - - if(d_update_right_SV) - { - if (fabs(checkOrthogonality(d_W)) > - std::numeric_limits::epsilon()*d_num_samples) { - d_W->orthogonalize(); - } - } - } } diff --git a/lib/linalg/svd/IncrementalSVDBrand.h b/lib/linalg/svd/IncrementalSVDBrand.h index 6b7e54482..3f5e3f9b5 100644 --- a/lib/linalg/svd/IncrementalSVDBrand.h +++ b/lib/linalg/svd/IncrementalSVDBrand.h @@ -27,6 +27,20 @@ namespace CAROM { class IncrementalSVDBrand : public IncrementalSVD { public: + /** + * @brief Constructor. + * + * @param[in] options The struct containing the options for this SVD + * implementation. + * @param[in] basis_file_name The base part of the name of the file + * containing the basis vectors. Each process + * will append its process ID to this base + * name. + * @see Options + */ + IncrementalSVDBrand( + Options options, + const std::string& basis_file_name); /** * @brief Destructor. */ @@ -50,23 +64,9 @@ class IncrementalSVDBrand : public IncrementalSVD const Matrix* getTemporalBasis() override; -private: - friend class BasisGenerator; + friend class IncrementalDMD; - /** - * @brief Constructor. - * - * @param[in] options The struct containing the options for this SVD - * implementation. - * @param[in] basis_file_name The base part of the name of the file - * containing the basis vectors. Each process - * will append its process ID to this base - * name. - * @see Options - */ - IncrementalSVDBrand( - Options options, - const std::string& basis_file_name); +private: /** * @brief Unimplemented default constructor. @@ -173,13 +173,37 @@ class IncrementalSVDBrand : public IncrementalSVD * @brief The matrix U'. U' is not distributed and the entire matrix * exists on each processor. */ + Matrix* d_U; + Matrix* d_U_pre; Matrix* d_Up; + Matrix* d_Up_pre; + Matrix* d_W; + Matrix* d_W_pre; + Matrix* d_Wp; + Matrix* d_Wp_pre; + Matrix* d_Wp_inv; + Vector* d_S_pre; + + Matrix* d_Uq; + Matrix* d_Sq_inv; + Matrix* d_Wq; + Vector* d_p; /** * @brief The tolerance value used to remove small singular values. */ double d_singular_value_tol; + /** + * @brief Maximum number of samples to be used. + */ + double d_max_num_samples; + + /** + * @brief Maximum number of basis. + */ + double d_max_num_basis; + }; } diff --git a/regression_tests/tests/dmd/heat_conduction_incdmd.sh b/regression_tests/tests/dmd/heat_conduction_incdmd.sh new file mode 100755 index 000000000..40ce21de7 --- /dev/null +++ b/regression_tests/tests/dmd/heat_conduction_incdmd.sh @@ -0,0 +1,8 @@ +#!/bin/bash +source $GITHUB_WORKSPACE/regression_tests/common.sh +CMDS=( + "$COMMAND ./heat_conduction_incdmd --inc" +) +TYPE="DMD" +OFFSET=5 +run_tests