/[escript]/trunk/paso/src/AMG_Restriction.cpp
ViewVC logotype

Annotation of /trunk/paso/src/AMG_Restriction.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3779 - (hide annotations)
Fri Jan 20 06:18:09 2012 UTC (7 years, 4 months ago) by lgao
Original Path: branches/amg_from_3530/paso/src/AMG_Restriction.c
File MIME type: text/plain
File size: 17937 byte(s)
Pass the tests for Classic Interpolation


1 lgao 3751 //
2     /*******************************************************
3     *
4     * Copyright (c) 2003-2011 by University of Queensland
5     * Earth Systems Science Computational Center (ESSCC)
6     * http://www.uq.edu.au/esscc
7     *
8     * Primary Business: Queensland, Australia
9     * Licensed under the Open Software License version 3.0
10     * http://www.opensource.org/licenses/osl-3.0.php
11     *
12     *******************************************************/
13    
14    
15     /**************************************************************/
16    
17     /* Paso: defines AMG Restriction Operator */
18    
19     /**************************************************************/
20    
21     /* Author: Lin Gao, lgao@uq.edu.au */
22    
23     /**************************************************************/
24    
25     #include "Paso.h"
26     #include "SparseMatrix.h"
27     #include "PasoUtil.h"
28     #include "Preconditioner.h"
29    
30     /**************************************************************
31    
32     Methods nessecary for AMG preconditioner
33    
34     construct n_C x n the Restriction matrix R from A_p.
35    
36     R is the transpose of P.
37     */
38    
39     #define MY_DEBUG 0
40     #define MY_DEBUG1 1
41    
42     Paso_SystemMatrix* Paso_Preconditioner_AMG_getRestriction(Paso_SystemMatrix* P)
43     {
44     Esys_MPIInfo *mpi_info=Esys_MPIInfo_getReference(P->mpi_info);
45     Paso_SparseMatrix *main_block=NULL, *couple_block=NULL;
46     Paso_SystemMatrix *out=NULL;
47     Paso_SystemMatrixPattern *pattern=NULL;
48     Paso_Distribution *input_dist=NULL, *output_dist=NULL;
49     Paso_SharedComponents *send =NULL, *recv=NULL;
50     Paso_Connector *col_connector=NULL, *row_connector=NULL;
51     Paso_Coupler *coupler=NULL;
52     Paso_Pattern *couple_pattern=NULL;
53     const dim_t row_block_size=P->row_block_size;
54     const dim_t col_block_size=P->col_block_size;
55     const dim_t n=P->mainBlock->numRows;
56     const dim_t n_C=P->mainBlock->numCols;
57     const dim_t num_threads=omp_get_max_threads();
58     index_t size=mpi_info->size, rank=mpi_info->rank, *dist=NULL;
59     index_t *ptr=NULL, *idx=NULL, *degree_set=NULL, *offset_set=NULL;
60     index_t *send_ptr=NULL, *recv_ptr=NULL, *recv_idx=NULL;
61     index_t *temp=NULL, *where_p=NULL;
62     index_t num_Pcouple_cols, num_Rcouple_cols, numNeighbors;
63 lgao 3779 index_t i, j, j_ub, k, p, iptr, iptr_ub, icb, irb;
64 lgao 3751 index_t block_size, copy_block_size, sum, offset, len, msgs;
65     double *val=NULL, *data_set=NULL, *recv_val=NULL;
66     index_t *shared=NULL, *offsetInShared=NULL;
67     Esys_MPI_rank *neighbor=NULL;
68     #ifdef ESYS_MPI
69     MPI_Request* mpi_requests=NULL;
70     MPI_Status* mpi_stati=NULL;
71     #else
72     int *mpi_requests=NULL, *mpi_stati=NULL;
73     #endif
74    
75 lgao 3779 if (MY_DEBUG1)
76 lgao 3751 fprintf(stderr, "Into Restriction %d %d\n", mpi_info->rank, rank);
77    
78     /* get main_block of R from the transpose of P->mainBlock */
79     main_block = Paso_SparseMatrix_getTranspose(P->mainBlock);
80 lgao 3779 if (MY_DEBUG1)
81     fprintf(stderr, "Rank %d CP1\n", rank);
82 lgao 3751
83     /* prepare "ptr" for the col_coupleBlock of R, start with get info about
84     the degree_set (associated with "ptr"), offset_set (associated with
85     "idx" and data_set (associated with "val") to be sent to other ranks */
86     couple_block = P->col_coupleBlock;
87     num_Pcouple_cols = couple_block->numCols;
88 lgao 3779 block_size = P->block_size;
89 lgao 3751 copy_block_size = block_size * sizeof(double);
90     degree_set = TMPMEMALLOC(num_Pcouple_cols, index_t);
91     send_ptr = TMPMEMALLOC(num_Pcouple_cols+1, index_t);
92     memset(degree_set, 0, sizeof(index_t) * num_Pcouple_cols);
93     for (i=0; i<n; i++) {
94     iptr_ub = couple_block->pattern->ptr[i+1];
95     for (iptr=couple_block->pattern->ptr[i]; iptr<iptr_ub; iptr++) {
96     j = couple_block->pattern->index[iptr];
97     degree_set[j] ++;
98     }
99     }
100    
101 lgao 3779 if (MY_DEBUG1)
102     fprintf(stderr, "rank %d Pcouple_cols %d block %d col_block %d\n", rank, num_Pcouple_cols, block_size, couple_block->block_size);
103 lgao 3751 send_ptr[0] = 0;
104     for (i=0; i<num_Pcouple_cols; i++) {
105     send_ptr[i+1] = send_ptr[i] + degree_set[i];
106     }
107    
108     memset(degree_set, 0, sizeof(index_t) * num_Pcouple_cols);
109     sum = couple_block->pattern->ptr[n];
110     offset_set = TMPMEMALLOC(sum, index_t);
111     data_set = TMPMEMALLOC(sum * block_size, double);
112     offset = P->pattern->output_distribution->first_component[rank];
113 lgao 3779
114     if (P->type & MATRIX_FORMAT_DIAGONAL_BLOCK) {
115     for (i=0; i<n; i++) {
116     iptr_ub = couple_block->pattern->ptr[i+1];
117     for (iptr=couple_block->pattern->ptr[i]; iptr<iptr_ub; iptr++) {
118     j = couple_block->pattern->index[iptr];
119     k = send_ptr[j] + degree_set[j];
120     offset_set[k] = i + offset; /* now we have the global id for row i,
121     which will be used as col index of R */
122     memcpy(&(data_set[k*block_size]), &(couple_block->val[iptr*block_size]), copy_block_size);
123     degree_set[j] ++;
124     }
125     }
126     } else {
127     for (i=0; i<n; i++) {
128     iptr_ub = couple_block->pattern->ptr[i+1];
129     for (iptr=couple_block->pattern->ptr[i]; iptr<iptr_ub; iptr++) {
130 lgao 3751 j = couple_block->pattern->index[iptr];
131     k = send_ptr[j] + degree_set[j];
132 lgao 3767 if (MY_DEBUG && rank == 3 && k <= 5) fprintf(stderr, "offset_set k %d i %d offset %d j %d send_ptr[j] %d degree_set[j] %d\n", k, i, offset, j, send_ptr[j], degree_set[j]);
133 lgao 3751 offset_set[k] = i + offset; /* now we have the global id for row i,
134     which will be used as col index of R */
135 lgao 3779 // memcpy(&(data_set[k*block_size]), &(couple_block->val[iptr*block_size]), copy_block_size);
136     for (irb=0 ; irb < row_block_size; irb++) {
137     for (icb =0 ; icb < col_block_size; icb++) {
138     if (MY_DEBUG1 && iptr*block_size+irb+row_block_size*icb >= couple_block->len)
139     fprintf(stderr, "rank %d ACCESS OVERFLOW %d(iptr %d block %d irb %d row %d icb%d) >= %d (%d, %d)\n", rank, iptr*block_size+irb+row_block_size*icb, iptr, block_size, irb, row_block_size, icb, couple_block->len, couple_block->pattern->len, couple_block->pattern->ptr[n]);
140     data_set[k*block_size+icb+col_block_size*irb] = couple_block->val[iptr*block_size+irb+row_block_size*icb];
141     }
142     }
143 lgao 3751 degree_set[j] ++;
144 lgao 3779 }
145 lgao 3751 }
146     }
147    
148    
149     #ifdef ESYS_MPI
150     mpi_requests=TMPMEMALLOC(size*4,MPI_Request);
151     mpi_stati=TMPMEMALLOC(size*4,MPI_Status);
152     #else
153     mpi_requests=TMPMEMALLOC(size*4,int);
154     mpi_stati=TMPMEMALLOC(size*4,int);
155     #endif
156    
157     /* send/receive degree_set to build the "ptr" for R->col_coupleBlock */
158     msgs = 0;
159     send = P->col_coupler->connector->send;
160 lgao 3779 if (MY_DEBUG) {
161     int my_sum = send->offsetInShared[1];
162     char *str1, *str2;
163     str1 = TMPMEMALLOC(my_sum*20+100, char);
164     str2 = TMPMEMALLOC(20, char);
165     sprintf(str1, "rank %d send->shared[%d] = \n", rank, my_sum);
166     for (i=0; i<my_sum; i++) {
167     if (send->shared[i] < 0 || send->shared[i] >=n_C) {
168     sprintf(str2, "(%d %d), ",i, send->shared[i]);
169     strcat(str1, str2);
170     }
171     }
172     fprintf(stderr, "%s\n", str1);
173     TMPMEMFREE(str1);
174     TMPMEMFREE(str2);
175     }
176 lgao 3751 recv = P->col_coupler->connector->recv;
177     recv_ptr = TMPMEMALLOC(send->offsetInShared[send->numNeighbors], index_t);
178     for (p=0; p<send->numNeighbors; p++) {
179     i = send->offsetInShared[p];
180     j = send->offsetInShared[p+1];
181     k = j - i;
182     if (k > 0) {
183     MPI_Irecv(&(recv_ptr[i]), k, MPI_INT, send->neighbor[p],
184     mpi_info->msg_tag_counter+send->neighbor[p],
185     mpi_info->comm, &mpi_requests[msgs]);
186     msgs++;
187     }
188     }
189    
190     for (p=0; p<recv->numNeighbors; p++) {
191     i = recv->offsetInShared[p];
192     j = recv->offsetInShared[p+1];
193     k = j - i;
194     if (k > 0) {
195 lgao 3767 if (MY_DEBUG && rank == 3 && recv->neighbor[p] == 4)
196     fprintf(stderr, "3 Send degree_set %d to 4 (%d) offset %d p %d\n", k, degree_set[i], i, p);
197 lgao 3751 MPI_Issend(&(degree_set[i]), k, MPI_INT, recv->neighbor[p],
198     mpi_info->msg_tag_counter+rank, mpi_info->comm,
199     &mpi_requests[msgs]);
200     msgs++;
201     }
202     }
203    
204     MPI_Waitall(msgs, mpi_requests, mpi_stati);
205     mpi_info->msg_tag_counter += size;
206 lgao 3779 if (MY_DEBUG1)
207 lgao 3751 fprintf(stderr, "rank %d Waitall(1)\n", rank);
208    
209     TMPMEMFREE(degree_set);
210     degree_set = TMPMEMALLOC(send->numNeighbors, index_t);
211     memset(degree_set, 0, sizeof(index_t)*send->numNeighbors);
212     for (p=0, sum=0; p<send->numNeighbors; p++) {
213     iptr_ub = send->offsetInShared[p+1];
214     for (iptr = send->offsetInShared[p]; iptr < iptr_ub; iptr++){
215     degree_set[p] += recv_ptr[iptr];
216     }
217     sum += degree_set[p];
218     }
219 lgao 3767 if (MY_DEBUG && rank == 4)
220     fprintf(stderr, "rank %d degree_set %d (%d %d)\n", rank, sum, degree_set[0], degree_set[1]);
221 lgao 3751
222     /* send/receive offset_set and data_set to build the "idx" and "val"
223     for R->col_coupleBlock */
224     msgs = 0;
225     recv_idx = TMPMEMALLOC(sum, index_t);
226     recv_val = TMPMEMALLOC(sum * block_size, double);
227     for (p=0, offset=0; p<send->numNeighbors; p++) {
228     if (degree_set[p]) {
229 lgao 3767 if (MY_DEBUG && rank == 4)
230     fprintf(stderr, "4 Recv %d from %d\n", degree_set[p], send->neighbor[p]);
231 lgao 3751 MPI_Irecv(&(recv_idx[offset]), degree_set[p], MPI_INT,
232     send->neighbor[p], mpi_info->msg_tag_counter+send->neighbor[p],
233     mpi_info->comm, &mpi_requests[msgs]);
234     msgs++;
235     MPI_Irecv(&(recv_val[offset*block_size]), degree_set[p] * block_size,
236     MPI_DOUBLE, send->neighbor[p],
237     mpi_info->msg_tag_counter+send->neighbor[p]+size,
238     mpi_info->comm, &mpi_requests[msgs]);
239     offset += degree_set[p];
240     msgs++;
241     }
242     }
243    
244     for (p=0; p<recv->numNeighbors; p++) {
245     i = recv->offsetInShared[p];
246     j = recv->offsetInShared[p+1];
247     k = send_ptr[j] - send_ptr[i];
248     if (k > 0) {
249 lgao 3767 if (MY_DEBUG && rank == 3 && recv->neighbor[p] == 4)
250     fprintf(stderr, "3 Send %d to %d (%d %d) offset %d\n", k, recv->neighbor[p], offset_set[i], offset_set[i+1], i);
251     MPI_Issend(&(offset_set[send_ptr[i]]), k, MPI_INT,
252 lgao 3751 recv->neighbor[p], mpi_info->msg_tag_counter+rank,
253     mpi_info->comm, &mpi_requests[msgs]);
254     msgs++;
255 lgao 3767 MPI_Issend(&(data_set[send_ptr[i]*block_size]), k*block_size, MPI_DOUBLE,
256 lgao 3751 recv->neighbor[p], mpi_info->msg_tag_counter+rank+size,
257     mpi_info->comm, &mpi_requests[msgs]);
258     msgs++;
259     }
260     }
261    
262     len = send->offsetInShared[send->numNeighbors];
263     temp = TMPMEMALLOC(len, index_t);
264     memset(temp, 0, sizeof(index_t)*len);
265     for (p=1; p<len; p++) {
266     temp[p] = temp[p-1] + recv_ptr[p-1];
267     }
268    
269     MPI_Waitall(msgs, mpi_requests, mpi_stati);
270     mpi_info->msg_tag_counter += 2*size;
271     TMPMEMFREE(degree_set);
272     TMPMEMFREE(offset_set);
273     TMPMEMFREE(data_set);
274     TMPMEMFREE(send_ptr);
275     TMPMEMFREE(mpi_requests);
276     TMPMEMFREE(mpi_stati);
277 lgao 3767 if (MY_DEBUG && rank == 4){
278     char *str1, *str2;
279 lgao 3779 int my_sum, my_i;
280     my_sum = 2;
281     str1 = TMPMEMALLOC(my_sum*100+100, char);
282 lgao 3767 str2 = TMPMEMALLOC(100, char);
283 lgao 3779 sprintf(str1, "recv_idx[%d] = (", my_sum);
284     for (my_i=0; my_i<my_sum; my_i++) {
285 lgao 3767 sprintf(str2, "%d ", recv_idx[my_i]);
286     strcat(str1, str2);
287     }
288     fprintf(stderr, "%s)\n", str1);
289     TMPMEMFREE(str1);
290     TMPMEMFREE(str2);
291     }
292 lgao 3751
293 lgao 3767
294 lgao 3779 if (MY_DEBUG1)
295 lgao 3751 fprintf(stderr, "rank %d Construct col_coupleBlock for R: %d %d \n", rank, n_C, sum);
296    
297     /* construct "ptr", "idx" and "val" for R->col_coupleBlock */
298     ptr = MEMALLOC(n_C + 1, index_t);
299     idx = MEMALLOC(sum, index_t);
300 lgao 3779 val = MEMALLOC(sum*block_size, double);
301 lgao 3751 ptr[0] = 0;
302     for (i=0; i<n_C; i++) {
303 lgao 3779 icb = 0;
304 lgao 3751 for (p=0; p<send->numNeighbors; p++) {
305     k = send->offsetInShared[p+1];
306     for (j=send->offsetInShared[p]; j<k; j++) {
307     if (MY_DEBUG)
308     fprintf(stderr, "rank %d send %d, i%d len%d\n", rank, send->shared[j], i, recv_ptr[j]);
309 lgao 3779 //if (send->shared[j] < 0 || send->shared[j] >= n_C)
310     //fprintf(stderr, "rank %d shared ele out of range %d %d %d\n", rank, j, send->shared[j], n_C);
311 lgao 3751 if (send->shared[j] == i) {
312 lgao 3779 offset = ptr[i] + icb;
313 lgao 3751 len = recv_ptr[j];
314     memcpy(&idx[offset], &recv_idx[temp[j]], sizeof(index_t)*len);
315 lgao 3779 if (temp[j] + len > sum)
316     fprintf(stderr, "rank %d ACCESS to recv_idx overflowed i%d j%d offset%d t_j%d len %d\n", rank, i, j, offset, temp[j], len);
317 lgao 3751 memcpy(&val[offset*block_size], &recv_val[temp[j]*block_size], sizeof(double)*len*block_size);
318 lgao 3779 icb += len;
319 lgao 3751 if (MY_DEBUG)
320     fprintf(stderr, "rank %d send %d, i%d len%d\n", rank, send->shared[j], i, recv_ptr[j]);
321 lgao 3779 if (MY_DEBUG) fprintf(stderr, "rank %d offset %d len %d block %d\n", rank, offset, len, block_size);
322 lgao 3751 break;
323     }
324     }
325     }
326 lgao 3779 ptr[i+1] = ptr[i] + icb;
327 lgao 3751 }
328     sum = ptr[n_C];
329     TMPMEMFREE(temp);
330     TMPMEMFREE(recv_ptr);
331     TMPMEMFREE(recv_val);
332    
333 lgao 3779 if (MY_DEBUG1)
334 lgao 3751 fprintf(stderr, "rank %d col_coupleBlock non-zero: %d\n", rank, sum);
335    
336     /* count the number of cols (num_Rcouple_cols) in R->col_coupleBlock,
337     and convert the global id in "idx" into local id */
338     num_Rcouple_cols = 0;
339     if (sum) {
340     #ifdef USE_QSORTG
341     qsortG(recv_idx, (size_t)sum, sizeof(index_t), Paso_comparIndex);
342     #else
343     qsort(recv_idx, (size_t)sum, sizeof(index_t), Paso_comparIndex);
344     #endif
345     num_Rcouple_cols = 1;
346     i = recv_idx[0];
347     for (j=1; j<sum; j++) {
348     if (recv_idx[j] > i) {
349     i = recv_idx[j];
350     recv_idx[num_Rcouple_cols] = i;
351     num_Rcouple_cols++;
352     }
353     }
354     for (i=0; i<sum; i++) {
355     where_p = (index_t *)bsearch(&(idx[i]), recv_idx, num_Rcouple_cols,
356     sizeof(index_t), Paso_comparIndex);
357 lgao 3779 if (where_p == NULL)
358     fprintf(stderr, "rank %d index out of range, idx[%d]=%d\n", rank, i, idx[i]);
359 lgao 3751 idx[i] = (index_t)(where_p - recv_idx);
360 lgao 3779 if (idx[i] >= num_Rcouple_cols)
361     fprintf(stderr, "rank %d index out of range (%d %d %d)\n", rank, i, idx[i], num_Rcouple_cols);
362 lgao 3751 }
363     }
364    
365 lgao 3779 if (MY_DEBUG1)
366 lgao 3751 fprintf(stderr, "rank %d Count num_Rcouple_cols %d\n", rank, num_Rcouple_cols);
367    
368     /* prepare the receiver for the col_connector */
369     dist = P->pattern->output_distribution->first_component;
370     offsetInShared = TMPMEMALLOC(size+1, index_t);
371     shared = TMPMEMALLOC(num_Rcouple_cols, index_t);
372     numNeighbors = send->numNeighbors;
373     neighbor = send->neighbor;
374     offsetInShared[0] = 0;
375 lgao 3767 if (MY_DEBUG && rank == 4){
376     char *str1, *str2;
377     int sum, my_i;
378     sum = num_Rcouple_cols;
379     str1 = TMPMEMALLOC((sum+size)*100+100, char);
380     str2 = TMPMEMALLOC(100, char);
381     sprintf(str1, "rank %d distribution[%d] = (", rank, size);
382     for (my_i=0; my_i<size; my_i++) {
383     sprintf(str2, "%d ", dist[my_i+1]);
384     strcat(str1, str2);
385     }
386     fprintf(stderr, "%s)\n", str1);
387     sprintf(str1, "rank %d recv_idx[%d] = (", rank, sum);
388     for (my_i=0; my_i<sum; my_i++) {
389     sprintf(str2, "%d ", recv_idx[my_i]);
390     strcat(str1, str2);
391     }
392     fprintf(stderr, "%s)\n", str1);
393     TMPMEMFREE(str1);
394     TMPMEMFREE(str2);
395     }
396 lgao 3751 for (i=0, p=0; i<num_Rcouple_cols; i++) {
397     offset = dist[neighbor[p] + 1];
398     /* cols i is received from rank neighbor[p] when it's still smaller
399     than "offset", otherwise, it is received from rank neighbor[p+1] */
400     if (recv_idx[i] >= offset) {
401     p++;
402     offsetInShared[p] = i;
403     }
404     shared[i] = i + n; /* n is the number of cols in R->mainBlock */
405     }
406     offsetInShared[numNeighbors] = num_Rcouple_cols;
407     recv = Paso_SharedComponents_alloc(n, numNeighbors,
408     neighbor, shared, offsetInShared, 1, 0, mpi_info);
409     TMPMEMFREE(recv_idx);
410    
411 lgao 3779 if (MY_DEBUG1)
412 lgao 3751 fprintf(stderr, "rank %d Receiver!!\n", rank);
413    
414     /* prepare the sender for the col_connector */
415     TMPMEMFREE(shared);
416     numNeighbors = P->col_coupler->connector->recv->numNeighbors;
417     neighbor = P->col_coupler->connector->recv->neighbor;
418     shared = TMPMEMALLOC(n * numNeighbors, index_t);
419     couple_pattern = P->col_coupleBlock->pattern;
420     sum=0;
421     offsetInShared[0] = 0;
422     for (p=0; p<numNeighbors; p++) {
423     j = P->col_coupler->connector->recv->offsetInShared[p];
424     j_ub = P->col_coupler->connector->recv->offsetInShared[p+1];
425 lgao 3767 if (MY_DEBUG && rank ==3 && P->col_coupler->connector->recv->neighbor[p] == 4) {
426     fprintf(stderr, "3sendto4 with P=%d\n", p);
427     }
428 lgao 3751 for (i=0; i<n; i++) {
429     iptr = couple_pattern->ptr[i];
430     iptr_ub = couple_pattern->ptr[i+1];
431     for (; iptr<iptr_ub; iptr++) {
432     k = couple_pattern->index[iptr];
433     if (k >= j && k < j_ub) {
434 lgao 3767 if (MY_DEBUG && rank ==3 && P->col_coupler->connector->recv->neighbor[p] == 4) {
435     fprintf(stderr, "send_idx[%d]=k%d i%d (%d, %d)\n", sum, k, i, j, j_ub);
436     }
437 lgao 3751 shared[sum] = i;
438     sum++;
439     break;
440     }
441     }
442     }
443     offsetInShared[p+1] = sum;
444     }
445     send = Paso_SharedComponents_alloc(n, numNeighbors,
446     neighbor, shared, offsetInShared, 1, 0, mpi_info);
447 lgao 3779 if (MY_DEBUG1)
448     fprintf(stderr, "rank %d Sender!! %d %d\n", rank, n_C, num_Rcouple_cols);
449 lgao 3751
450     /* build the col_connector based on sender and receiver */
451     col_connector = Paso_Connector_alloc(send, recv);
452     Paso_SharedComponents_free(recv);
453     Paso_SharedComponents_free(send);
454     TMPMEMFREE(offsetInShared);
455     TMPMEMFREE(shared);
456    
457     couple_pattern = Paso_Pattern_alloc(MATRIX_FORMAT_DEFAULT, n_C,
458     num_Rcouple_cols, ptr, idx);
459    
460     input_dist = Paso_Distribution_alloc(mpi_info, dist, 1, 0);
461     dist = P->pattern->input_distribution->first_component;
462     output_dist = Paso_Distribution_alloc(mpi_info, dist, 1, 0);
463    
464 lgao 3779 if (MY_DEBUG)
465     fprintf(stderr, "rank %d Before alloc System Pattern for Restriction %d %s\n", rank, Esys_getErrorType(), Esys_getErrorMessage());
466    
467 lgao 3751 /* now we need to create the System Matrix
468     TO BE FIXED: at this stage, we only construction col_couple_pattern
469     and col_connector for Restriction matrix R. To be completed,
470     row_couple_pattern and row_connector need to be constructed as well */
471     if (Esys_noError()) {
472     pattern = Paso_SystemMatrixPattern_alloc(MATRIX_FORMAT_DEFAULT,
473     output_dist, input_dist, main_block->pattern, couple_pattern,
474     couple_pattern, col_connector, col_connector);
475     out = Paso_SystemMatrix_alloc(MATRIX_FORMAT_DIAGONAL_BLOCK, pattern,
476     row_block_size, col_block_size, FALSE);
477     }
478    
479     /* now fill in the matrix */
480     memcpy(out->mainBlock->val, main_block->val,
481     main_block->len * sizeof(double));
482     memcpy(out->col_coupleBlock->val, val,
483     out->col_coupleBlock->len * sizeof(double));
484 lgao 3758 MEMFREE(val);
485 lgao 3751
486 lgao 3779 if (MY_DEBUG)
487     fprintf(stderr, "rank %d After alloc System Pattern for Restriction\n", rank);
488    
489    
490 lgao 3751 /* clean up */
491     Paso_SparseMatrix_free(main_block);
492     Paso_SystemMatrixPattern_free(pattern);
493     Paso_Pattern_free(couple_pattern);
494     Paso_Connector_free(col_connector);
495     Paso_Distribution_free(output_dist);
496     Paso_Distribution_free(input_dist);
497    
498     if (Esys_noError()) {
499     return out;
500     } else {
501     Paso_SystemMatrix_free(out);
502     return NULL;
503     }
504     }
505    

  ViewVC Help
Powered by ViewVC 1.1.26