49 using typename Dune::IterativeSolver<X, X>::domain_type;
50 using typename Dune::IterativeSolver<X, X>::range_type;
51 using typename Dune::IterativeSolver<X, X>::field_type;
52 using typename Dune::IterativeSolver<X, X>::real_type;
53 using typename Dune::IterativeSolver<X, X>::scalar_real_type;
54 static constexpr auto block_size = domain_type::block_type::dimension;
55 using XGPU = Opm::cuistl::CuVector<real_type>;
59 Dune::ScalarProduct<X>&
sp,
60 std::shared_ptr<Dune::Preconditioner<X, X>> prec,
64 : Dune::IterativeSolver<X, X>(
op,
sp, *prec, reduction, maxit,
verbose)
65 , m_opOnCPUWithMatrix(
op)
67 , m_underlyingSolver(constructSolver(prec, reduction, maxit,
verbose))
71 virtual void apply(X& x, X&
b,
double reduction, Dune::InverseOperatorResult&
res)
override
78 m_inputBuffer.reset(
new XGPU(
b.dim()));
79 m_outputBuffer.reset(
new XGPU(x.dim()));
82 m_inputBuffer->copyFromHost(
b);
84 m_outputBuffer->copyFromHost(x);
86 m_underlyingSolver.apply(*m_outputBuffer, *m_inputBuffer, reduction,
res);
89 m_inputBuffer->copyToHost(
b);
90 m_outputBuffer->copyToHost(x);
92 virtual void apply(X& x, X&
b, Dune::InverseOperatorResult&
res)
override
98 m_inputBuffer.reset(
new XGPU(
b.dim()));
99 m_outputBuffer.reset(
new XGPU(x.dim()));
102 m_inputBuffer->copyFromHost(
b);
104 m_outputBuffer->copyFromHost(x);
106 m_underlyingSolver.apply(*m_outputBuffer, *m_inputBuffer,
res);
109 m_inputBuffer->copyToHost(
b);
110 m_outputBuffer->copyToHost(x);
114 Operator& m_opOnCPUWithMatrix;
129 "Currently we only support operators of type MatrixAdapter in the CUDA solver. "
130 "Use --matrix-add-well-contributions=true. "
131 "Using WellModelMatrixAdapter with SolverAdapter is not well-defined so it will not work well, or at all.");
143 auto precAsHolder = std::dynamic_pointer_cast<PreconditionerHolder<X, X>>(prec);
146 "The preconditioner needs to be a CUDA preconditioner (eg. CuILU0) wrapped in a "
147 "Opm::cuistl::PreconditionerAdapter wrapped in a "
148 "Opm::cuistl::CuBlockPreconditioner. If you are unsure what this means, set "
149 "preconditioner to 'CUILU0'");
157 "The preconditioner needs to be a CUDA preconditioner (eg. CuILU0) wrapped in a "
158 "Opm::cuistl::PreconditionerAdapter wrapped in a "
159 "Opm::cuistl::CuBlockPreconditioner. If you are unsure what this means, set "
160 "preconditioner to 'CUILU0'");
164 const auto& communication = m_opOnCPUWithMatrix.getCommunication();
168 = Dune::OverlappingSchwarzOperator<CuSparseMatrix<real_type>, XGPU, XGPU,
CudaCommunication>;
171 auto mpiPreconditioner = std::make_shared<CuBlockPreconditioner<XGPU, XGPU, CudaCommunication>>(
174 auto scalarProduct = std::make_shared<Dune::ParallelScalarProduct<XGPU, CudaCommunication>>(
193 auto precAsHolder = std::dynamic_pointer_cast<PreconditionerHolder<XGPU, XGPU>>(prec);
196 "The preconditioner needs to be a CUDA preconditioner wrapped in a "
197 "Opm::cuistl::PreconditionerHolder (eg. CuILU0).");
202 = std::make_shared<Dune::MatrixAdapter<CuSparseMatrix<real_type>, XGPU, XGPU>>(m_matrix);
203 auto scalarProduct = std::make_shared<Dune::SeqScalarProduct<XGPU>>();
209 std::unique_ptr<XGPU> m_inputBuffer;
210 std::unique_ptr<XGPU> m_outputBuffer;