20 #ifndef SELDON_FILE_DISTRIBUTED_CHOLESKY_SOLVER_CXX
36 #ifdef SELDON_WITH_MPI
39 nb_unknowns_scal_ = 1;
40 comm_ = MPI_COMM_SELF;
41 ProcSharingRows_ = NULL;
42 SharingRowNumbers_ = NULL;
49 template<
class Prop,
class Storage,
class Allocator>
58 #ifdef SELDON_WITH_MPI
61 template<
class Prop0,
class Storage0,
class Allocator0>
66 int nb_proc; MPI_Comm_size(comm, &nb_proc);
84 int rank_proc; MPI_Comm_rank(comm, &rank_proc);
86 if (this->type_solver != SparseCholeskySolver<T>::PASTIX)
88 cout <<
"Only available for Pastix" << endl;
92 DistributedMatrix<T, Prop0, Storage0, Allocator0> Bstore;
93 DistributedMatrix<T, Prop0, Storage0, Allocator0>* B;
101 #ifdef SELDON_WITH_PASTIX
102 MatrixPastix<T>& mat_pastix =
dynamic_cast<MatrixPastix<T>&
>(*this->solver);
103 mat_pastix.SetCholeskyFacto(
true);
105 cout <<
"Recompile Seldon with Pastix" << endl;
109 bool sym_pattern =
true, reorder_num =
false;
111 if (this->solver->UseInteger8())
113 Vector<int64_t> Ptr, IndRow;
116 AssembleDistributed(*B, sym, comm, global_col_numbers,
118 Ptr, IndRow, Val, sym_pattern, reorder_num);
121 this->solver->FactorizeDistributedMatrix(comm, Ptr, IndRow, Val,
122 global_col_numbers, sym_matrix, reorder_num);
126 Vector<long> Ptr; Vector<int> IndRow;
129 AssembleDistributed(*B, sym, comm, global_col_numbers,
131 Ptr, IndRow, Val, sym_pattern, reorder_num);
134 this->solver->FactorizeDistributedMatrix(comm, Ptr, IndRow, Val,
135 global_col_numbers, sym_matrix, reorder_num);
142 template<
class T>
template<
class T2>
143 void DistributedCholeskySolver<T>::AssembleVec(Vector<T2>& X)
const
145 AssembleVector(X, MPI_SUM, *ProcSharingRows_, *SharingRowNumbers_,
146 comm_, nodl_scalar_, nb_unknowns_scal_, 20);
151 template<
class T>
template<
class T2>
152 void DistributedCholeskySolver<T>::AssembleVec(Matrix<T2, General, ColMajor>& A)
const
157 for (
int k = 0; k < nrhs; k++)
159 X.SetData(A.GetM(), &A(0, k));
160 AssembleVector(X, MPI_SUM, *ProcSharingRows_, *SharingRowNumbers_,
161 comm_, nodl_scalar_, nb_unknowns_scal_, 21);
169 template<
class T>
template<
class T1>
170 void DistributedCholeskySolver<T>::Solve(
const SeldonTranspose& trans,
171 Vector<T1>& x_solution,
bool assemble)
174 #ifdef SELDON_WITH_MPI
175 MPI_Comm& comm = comm_;
176 int nb_proc; MPI_Comm_size(comm, &nb_proc);
182 int n = local_col_numbers.GetM();
183 Vector<T1> x_sol_extract(n);
184 for (
int i = 0; i < local_col_numbers.GetM(); i++)
185 x_sol_extract(i) = x_solution(local_col_numbers(i));
187 #ifdef SELDON_WITH_PASTIX
188 MatrixPastix<T>& mat_pastix =
dynamic_cast<MatrixPastix<T>&
>(*this->solver);
190 mat_pastix.SolveDistributed(comm, trans, x_sol_extract, global_col_numbers);
192 cout <<
"Recompile Seldon with Pastix" << endl;
197 for (
int i = 0; i < local_col_numbers.GetM(); i++)
198 x_solution(local_col_numbers(i)) = x_sol_extract(i);
202 this->AssembleVec(x_solution);
210 template<
class T>
template<
class T1>
211 void DistributedCholeskySolver<T>::Mlt(
const SeldonTranspose& trans,
212 Vector<T1>& x_solution,
bool assemble)
214 #ifdef SELDON_WITH_MPI
215 MPI_Comm& comm = comm_;
216 int nb_proc; MPI_Comm_size(comm, &nb_proc);
222 int n = local_col_numbers.GetM();
223 Vector<T1> x_sol_extract(n);
224 for (
int i = 0; i < local_col_numbers.GetM(); i++)
225 x_sol_extract(i) = x_solution(local_col_numbers(i));
227 #ifdef SELDON_WITH_PASTIX
228 MatrixPastix<T>& mat_pastix =
dynamic_cast<MatrixPastix<T>&
>(*this->solver);
230 mat_pastix.MltDistributed(comm, trans, x_sol_extract, global_col_numbers);
232 cout <<
"Recompile Seldon with Pastix" << endl;
237 for (
int i = 0; i < local_col_numbers.GetM(); i++)
238 x_solution(local_col_numbers(i)) = x_sol_extract(i);
242 this->AssembleVec(x_solution);
252 #define SELDON_FILE_DISTRIBUTED_CHOLESKY_SOLVER_CXX