/[escript]/trunk/escript/src/DataLazy.cpp
ViewVC logotype

Diff of /trunk/escript/src/DataLazy.cpp

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1889 by jfenwick, Thu Oct 16 05:57:09 2008 UTC revision 1898 by jfenwick, Mon Oct 20 01:20:18 2008 UTC
# Line 395  DataLazy::collapse() Line 395  DataLazy::collapse()
395    m_op=IDENTITY;    m_op=IDENTITY;
396  }  }
397    
398  const double*  DataTypes::ValueType*
399  DataLazy::resolveUnary(ValueType& v,int sampleNo,  size_t offset) const  DataLazy::resolveUnary(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const
400  {  {
401      // we assume that any collapsing has been done before we get here      // we assume that any collapsing has been done before we get here
402      // since we only have one argument we don't need to think about only      // since we only have one argument we don't need to think about only
# Line 405  DataLazy::resolveUnary(ValueType& v,int Line 405  DataLazy::resolveUnary(ValueType& v,int
405    {    {
406      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");      throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
407    }    }
408    const double* left=m_left->resolveSample(v,sampleNo,offset);    const ValueType* vleft=m_left->resolveSample(v,offset,sampleNo,roffset);
409      const double* left=&((*vleft)[roffset]);
410    double* result=&(v[offset]);    double* result=&(v[offset]);
411      roffset=offset;
412    switch (m_op)    switch (m_op)
413    {    {
414      case SIN:        case SIN:  
# Line 509  DataLazy::resolveUnary(ValueType& v,int Line 511  DataLazy::resolveUnary(ValueType& v,int
511      default:      default:
512      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
513    }    }
514    return result;    return &v;
515  }  }
516    
517    
518    
519    // const double*
520    // DataLazy::resolveUnary(ValueType& v,int sampleNo,  size_t offset) const
521    // {
522    //  // we assume that any collapsing has been done before we get here
523    //  // since we only have one argument we don't need to think about only
524    //  // processing single points.
525    //   if (m_readytype!='E')
526    //   {
527    //     throw DataException("Programmer error - resolveUnary should only be called on expanded Data.");
528    //   }
529    //   const double* left=m_left->resolveSample(v,sampleNo,offset);
530    //   double* result=&(v[offset]);
531    //   switch (m_op)
532    //   {
533    //     case SIN:    
534    //  tensor_unary_operation(m_samplesize, left, result, ::sin);
535    //  break;
536    //     case COS:
537    //  tensor_unary_operation(m_samplesize, left, result, ::cos);
538    //  break;
539    //     case TAN:
540    //  tensor_unary_operation(m_samplesize, left, result, ::tan);
541    //  break;
542    //     case ASIN:
543    //  tensor_unary_operation(m_samplesize, left, result, ::asin);
544    //  break;
545    //     case ACOS:
546    //  tensor_unary_operation(m_samplesize, left, result, ::acos);
547    //  break;
548    //     case ATAN:
549    //  tensor_unary_operation(m_samplesize, left, result, ::atan);
550    //  break;
551    //     case SINH:
552    //  tensor_unary_operation(m_samplesize, left, result, ::sinh);
553    //  break;
554    //     case COSH:
555    //  tensor_unary_operation(m_samplesize, left, result, ::cosh);
556    //  break;
557    //     case TANH:
558    //  tensor_unary_operation(m_samplesize, left, result, ::tanh);
559    //  break;
560    //     case ERF:
561    // #ifdef _WIN32
562    //  throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");
563    // #else
564    //  tensor_unary_operation(m_samplesize, left, result, ::erf);
565    //  break;
566    // #endif
567    //    case ASINH:
568    // #ifdef _WIN32
569    //  tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);
570    // #else
571    //  tensor_unary_operation(m_samplesize, left, result, ::asinh);
572    // #endif  
573    //  break;
574    //    case ACOSH:
575    // #ifdef _WIN32
576    //  tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);
577    // #else
578    //  tensor_unary_operation(m_samplesize, left, result, ::acosh);
579    // #endif  
580    //  break;
581    //    case ATANH:
582    // #ifdef _WIN32
583    //  tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);
584    // #else
585    //  tensor_unary_operation(m_samplesize, left, result, ::atanh);
586    // #endif  
587    //  break;
588    //     case LOG10:
589    //  tensor_unary_operation(m_samplesize, left, result, ::log10);
590    //  break;
591    //     case LOG:
592    //  tensor_unary_operation(m_samplesize, left, result, ::log);
593    //  break;
594    //     case SIGN:
595    //  tensor_unary_operation(m_samplesize, left, result, escript::fsign);
596    //  break;
597    //     case ABS:
598    //  tensor_unary_operation(m_samplesize, left, result, ::fabs);
599    //  break;
600    //     case NEG:
601    //  tensor_unary_operation(m_samplesize, left, result, negate<double>());
602    //  break;
603    //     case POS:
604    //  // it doesn't mean anything for delayed.
605    //  // it will just trigger a deep copy of the lazy object
606    //  throw DataException("Programmer error - POS not supported for lazy data.");
607    //  break;
608    //     case EXP:
609    //  tensor_unary_operation(m_samplesize, left, result, ::exp);
610    //  break;
611    //     case SQRT:
612    //  tensor_unary_operation(m_samplesize, left, result, ::sqrt);
613    //  break;
614    //     case RECIP:
615    //  tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));
616    //  break;
617    //     case GZ:
618    //  tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));
619    //  break;
620    //     case LZ:
621    //  tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));
622    //  break;
623    //     case GEZ:
624    //  tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));
625    //  break;
626    //     case LEZ:
627    //  tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));
628    //  break;
629    //
630    //     default:
631    //  throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+".");
632    //   }
633    //   return result;
634    // }
635    
636  #define PROC_OP(X) \  #define PROC_OP(X) \
637      for (int i=0;i<steps;++i,resultp+=getNoValues()) \      for (int i=0;i<steps;++i,resultp+=getNoValues()) \
638      { \      { \
# Line 525  cout << right[0] << right[1] << right[2] Line 645  cout << right[0] << right[1] << right[2]
645  cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \  cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \
646      }      }
647    
648  const double*  DataTypes::ValueType*
649  DataLazy::resolveBinary(ValueType& v,int sampleNo,  size_t offset) const  DataLazy::resolveBinary(ValueType& v,  size_t offset ,int sampleNo, size_t& roffset) const
650  {  {
651      // again we assume that all collapsing has already been done      // again we assume that all collapsing has already been done
652      // so we have at least one expanded child.      // so we have at least one expanded child.
# Line 534  DataLazy::resolveBinary(ValueType& v,int Line 654  DataLazy::resolveBinary(ValueType& v,int
654    
655  cout << "Resolve binary: " << toString() << endl;  cout << "Resolve binary: " << toString() << endl;
656    
657    const double* left=m_left->resolveSample(v,sampleNo,offset);    size_t lroffset=0, rroffset=0;
658  cout << "Done Left " << left[0] << left[1] << left[2] << endl;  
   const double* right=m_right->resolveSample(v,sampleNo,offset);      
 cout << "Done Right"  << right[0] << right[1] << right[2] << endl;  
     // now we need to know which args are expanded  
659    bool leftExp=(m_left->m_readytype=='E');    bool leftExp=(m_left->m_readytype=='E');
660    bool rightExp=(m_right->m_readytype=='E');    bool rightExp=(m_right->m_readytype=='E');
661    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step    bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp)); // is processing in single step
662    int steps=(bigloops?1:getNumSamples());    int steps=(bigloops?1:getNumDPPSample());
663    size_t chunksize=(bigloops? m_samplesize : getNoValues());    size_t chunksize=(bigloops? m_samplesize : getNoValues());
664    int leftStep=(rightExp? getNoValues() : 0);    int leftStep=((leftExp && !rightExp)? getNoValues() : 0);
665    int rightStep=(leftExp? getNoValues() : 0);    int rightStep=((rightExp && !leftExp)? getNoValues() : 0);
666    
667      const ValueType* left=m_left->resolveSample(v,offset,sampleNo,lroffset);
668      const ValueType* right=m_right->resolveSample(v,offset,sampleNo,rroffset);    
669        // now we need to know which args are expanded
670  cout << "left=" << left << " right=" << right << endl;  cout << "left=" << left << " right=" << right << endl;
671    double* result=&(v[offset]);  cout << "(Length) l=" << left->size() << " r=" << right->size() << " res=" << v.size() << endl;
672    double* resultp=result;    double* resultp=&(v[offset]);
673    switch(m_op)    switch(m_op)
674    {    {
675      case ADD:      case ADD:
676      PROC_OP(plus<double>())      for (int i=0;i<steps;++i,resultp+=getNoValues())
677        {
678    cerr << "Step#" << i << " chunk=" << chunksize << endl;
679    cerr << left << "[" << lroffset << "] " << right << "[" << rroffset << "]" << endl;
680           tensor_binary_operation(chunksize, &((*left)[lroffset]), &((*right)[rroffset]), resultp, plus<double>());
681           lroffset+=leftStep;
682           rroffset+=rightStep;
683    cerr << "left=" << lroffset << " right=" << rroffset << endl;
684        }
685      break;      break;
686  // need to fill in the rest  // need to fill in the rest
687      default:      default:
688      throw DataException("Programmer error - resolveBinay can not resolve operator "+opToString(m_op)+".");      throw DataException("Programmer error - resolveBinary can not resolve operator "+opToString(m_op)+".");
689    }    }
690  cout << "About to return "  << result[0] << result[1] << result[2] << endl;;    roffset=offset;
691    return result;    return &v;
692  }  }
693    
694    
695    
696    // #define PROC_OP(X) \
697    //  for (int i=0;i<steps;++i,resultp+=getNoValues()) \
698    //  { \
699    // cout << "Step#" << i << " chunk=" << chunksize << endl; \
700    // cout << left[0] << left[1] << left[2] << endl; \
701    // cout << right[0] << right[1] << right[2] << endl; \
702    //     tensor_binary_operation(chunksize, left, right, resultp, X); \
703    //     left+=leftStep; \
704    //     right+=rightStep; \
705    // cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl; \
706    //  }
707    //
708    // const double*
709    // DataLazy::resolveBinary(ValueType& v,int sampleNo,  size_t offset) const
710    // {
711    //  // again we assume that all collapsing has already been done
712    //  // so we have at least one expanded child.
713    //  // however, we could still have one of the children being not expanded.
714    //
715    // cout << "Resolve binary: " << toString() << endl;
716    //
717    //   const double* left=m_left->resolveSample(v,sampleNo,offset);
718    // // cout << "Done Left " << /*left[0] << left[1] << left[2] << */endl;
719    //   const double* right=m_right->resolveSample(v,sampleNo,offset);
720    // // cout << "Done Right"  << /*right[0] << right[1] << right[2] <<*/ endl;
721    //      // now we need to know which args are expanded
722    //   bool leftExp=(m_left->m_readytype=='E');
723    //   bool rightExp=(m_right->m_readytype=='E');
724    //   bool bigloops=((leftExp && rightExp) || (!leftExp && !rightExp));  // is processing in single step
725    //   int steps=(bigloops?1:getNumSamples());
726    //   size_t chunksize=(bigloops? m_samplesize : getNoValues());
727    //   int leftStep=((leftExp && !rightExp)? getNoValues() : 0);
728    //   int rightStep=((rightExp && !leftExp)? getNoValues() : 0);
729    // cout << "left=" << left << " right=" << right << endl;
730    //   double* result=&(v[offset]);
731    //   double* resultp=result;
732    //   switch(m_op)
733    //   {
734    //     case ADD:
735    //  for (int i=0;i<steps;++i,resultp+=getNoValues())
736    //  {
737    // cout << "Step#" << i << " chunk=" << chunksize << endl; \
738    // // cout << left[0] << left[1] << left[2] << endl;
739    // // cout << right[0] << right[1] << right[2] << endl;
740    //     tensor_binary_operation(chunksize, left, right, resultp, plus<double>());
741    // cout << "left=" << left << " right=" << right << " resp=" << resultp << endl;
742    //     left+=leftStep;
743    //     right+=rightStep;
744    // cout << "left=" << left << " right=" << right << endl;
745    // // cout << "Result=" << result << " " << result[0] << result[1] << result[2] << endl;
746    //  }
747    //  break;
748    // // need to fill in the rest
749    //     default:
750    //  throw DataException("Programmer error - resolveBinay can not resolve operator "+opToString(m_op)+".");
751    //   }
752    // // cout << "About to return "  << result[0] << result[1] << result[2] << endl;;
753    //   return result;
754    // }
755    
756    // // the vector and the offset are a place where the method could write its data if it wishes
757    // // it is not obligated to do so. For example, if it has its own storage already, it can use that.
758    // // Hence the return value to indicate where the data is actually stored.
759    // // Regardless, the storage should be assumed to be used, even if it isn't.
760    // const double*
761    // DataLazy::resolveSample(ValueType& v,int sampleNo,  size_t offset )
762    // {
763    // cout << "Resolve sample " << toString() << endl;
764    //  // collapse so we have a 'E' node or an IDENTITY for some other type
765    //   if (m_readytype!='E' && m_op!=IDENTITY)
766    //   {
767    //  collapse();
768    //   }
769    //   if (m_op==IDENTITY)    
770    //   {
771    //     const ValueType& vec=m_id->getVector();
772    //     if (m_readytype=='C')
773    //     {
774    //  return &(vec[0]);
775    //     }
776    //     return &(vec[m_id->getPointOffset(sampleNo, 0)]);
777    //   }
778    //   if (m_readytype!='E')
779    //   {
780    //     throw DataException("Programmer Error - Collapse did not produce an expanded node.");
781    //   }
782    //   switch (getOpgroup(m_op))
783    //   {
784    //   case G_UNARY: return resolveUnary(v,sampleNo,offset);
785    //   case G_BINARY: return resolveBinary(v,sampleNo,offset);
786    //   default:
787    //     throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
788    //   }
789    // }
790    
791    
792    
793  // the vector and the offset are a place where the method could write its data if it wishes  // the vector and the offset are a place where the method could write its data if it wishes
794  // it is not obligated to do so. For example, if it has its own storage already, it can use that.  // it is not obligated to do so. For example, if it has its own storage already, it can use that.
795  // Hence the return value to indicate where the data is actually stored.  // Hence the return value to indicate where the data is actually stored.
796  // Regardless, the storage should be assumed to be used, even if it isn't.  // Regardless, the storage should be assumed to be used, even if it isn't.
797  const double*  
798  DataLazy::resolveSample(ValueType& v,int sampleNo,  size_t offset )  // the roffset is the offset within the returned vector where the data begins
799    const DataTypes::ValueType*
800    DataLazy::resolveSample(ValueType& v, size_t offset, int sampleNo, size_t& roffset)
801  {  {
802  cout << "Resolve sample " << toString() << endl;  cout << "Resolve sample " << toString() << endl;
803      // collapse so we have a 'E' node or an IDENTITY for some other type      // collapse so we have a 'E' node or an IDENTITY for some other type
# Line 575  cout << "Resolve sample " << toString() Line 805  cout << "Resolve sample " << toString()
805    {    {
806      collapse();      collapse();
807    }    }
 cout << "Post collapse check\n";  
808    if (m_op==IDENTITY)      if (m_op==IDENTITY)  
809    {    {
 cout << "In IDENTITY check\n";  
810      const ValueType& vec=m_id->getVector();      const ValueType& vec=m_id->getVector();
811      if (m_readytype=='C')      if (m_readytype=='C')
812      {      {
813      return &(vec[0]);      roffset=0;
814        return &(vec);
815      }      }
816      return &(vec[m_id->getPointOffset(sampleNo, 0)]);      roffset=m_id->getPointOffset(sampleNo, 0);
817        return &(vec);
818    }    }
819    if (m_readytype!='E')    if (m_readytype!='E')
820    {    {
821      throw DataException("Programmer Error - Collapse did not produce an expanded node.");      throw DataException("Programmer Error - Collapse did not produce an expanded node.");
822    }    }
 cout << "Calling sub resolvers\n";  
823    switch (getOpgroup(m_op))    switch (getOpgroup(m_op))
824    {    {
825    case G_UNARY: return resolveUnary(v,sampleNo,offset);    case G_UNARY: return resolveUnary(v, offset,sampleNo,roffset);
826    case G_BINARY: return resolveBinary(v,sampleNo,offset);    case G_BINARY: return resolveBinary(v, offset,sampleNo,roffset);
827    default:    default:
828      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");      throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+".");
829    }    }
830  }  }
831    
832    
 // the vector and the offset are a place where the method could write its data if it wishes  
 // it is not obligated to do so. For example, if it has its own storage already, it can use that.  
 // Hence the return value to indicate where the data is actually stored.  
 // Regardless, the storage should be assumed to be used, even if it isn't.  
 const double*  
 DataLazy::resolveSample2(ValueType& v,int sampleNo,  size_t offset )  
 {  
   if (m_readytype!='E')  
   {  
     throw DataException("Only supporting Expanded Data.");  
   }  
   if (m_op==IDENTITY)    
   {  
     const ValueType& vec=m_id->getVector();  
     return &(vec[m_id->getPointOffset(sampleNo, 0)]);  
   }  
   size_t rightoffset=offset+m_samplesize;  
   const double* left=m_left->resolveSample(v,sampleNo,offset);  
   const double* right=0;  
   if (getOpgroup(m_op)==G_BINARY)  
   {  
     right=m_right->resolveSample(v,sampleNo,rightoffset);  
   }  
   double* result=&(v[offset]);  
   {  
     switch(m_op)  
     {  
     case ADD:       // since these are pointwise ops, pretend each sample is one point  
     tensor_binary_operation(m_samplesize, left, right, result, plus<double>());  
     break;  
     case SUB:        
     tensor_binary_operation(m_samplesize, left, right, result, minus<double>());  
     break;  
     case MUL:        
     tensor_binary_operation(m_samplesize, left, right, result, multiplies<double>());  
     break;  
     case DIV:        
     tensor_binary_operation(m_samplesize, left, right, result, divides<double>());  
     break;  
 // unary ops  
     case SIN:  
     tensor_unary_operation(m_samplesize, left, result, ::sin);  
     break;  
     case COS:  
     tensor_unary_operation(m_samplesize, left, result, ::cos);  
     break;  
     case TAN:  
     tensor_unary_operation(m_samplesize, left, result, ::tan);  
     break;  
     case ASIN:  
     tensor_unary_operation(m_samplesize, left, result, ::asin);  
     break;  
     case ACOS:  
     tensor_unary_operation(m_samplesize, left, result, ::acos);  
     break;  
     case ATAN:  
     tensor_unary_operation(m_samplesize, left, result, ::atan);  
     break;  
     case SINH:  
     tensor_unary_operation(m_samplesize, left, result, ::sinh);  
     break;  
     case COSH:  
     tensor_unary_operation(m_samplesize, left, result, ::cosh);  
     break;  
     case TANH:  
     tensor_unary_operation(m_samplesize, left, result, ::tanh);  
     break;  
     case ERF:  
 #ifdef _WIN32  
     throw DataException("Error - Data:: erf function is not supported on _WIN32 platforms.");  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::erf);  
     break;  
 #endif  
    case ASINH:  
 #ifdef _WIN32  
     tensor_unary_operation(m_samplesize, left, result, escript::asinh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::asinh);  
 #endif    
     break;  
    case ACOSH:  
 #ifdef _WIN32  
     tensor_unary_operation(m_samplesize, left, result, escript::acosh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::acosh);  
 #endif    
     break;  
    case ATANH:  
 #ifdef _WIN32  
     tensor_unary_operation(m_samplesize, left, result, escript::atanh_substitute);  
 #else  
     tensor_unary_operation(m_samplesize, left, result, ::atanh);  
 #endif    
     break;  
     case LOG10:  
     tensor_unary_operation(m_samplesize, left, result, ::log10);  
     break;  
     case LOG:  
     tensor_unary_operation(m_samplesize, left, result, ::log);  
     break;  
     case SIGN:  
     tensor_unary_operation(m_samplesize, left, result, escript::fsign);  
     break;  
     case ABS:  
     tensor_unary_operation(m_samplesize, left, result, ::fabs);  
     break;  
     case NEG:  
     tensor_unary_operation(m_samplesize, left, result, negate<double>());  
     break;  
     case POS:  
     // it doesn't mean anything for delayed.  
     // it will just trigger a deep copy of the lazy object  
     throw DataException("Programmer error - POS not supported for lazy data.");  
     break;  
     case EXP:  
     tensor_unary_operation(m_samplesize, left, result, ::exp);  
     break;  
     case SQRT:  
     tensor_unary_operation(m_samplesize, left, result, ::sqrt);  
     break;  
     case RECIP:  
     tensor_unary_operation(m_samplesize, left, result, bind1st(divides<double>(),1.));  
     break;  
     case GZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater<double>(),0.0));  
     break;  
     case LZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less<double>(),0.0));  
     break;  
     case GEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(greater_equal<double>(),0.0));  
     break;  
     case LEZ:  
     tensor_unary_operation(m_samplesize, left, result, bind2nd(less_equal<double>(),0.0));  
     break;  
833    
834      default:  
835      throw DataException("Programmer error - do not know how to resolve operator "+opToString(m_op)+".");  // This version uses double* trying again with vectors
836      }  // DataReady_ptr
837    }  // DataLazy::resolve()
838    return result;  // {
839  }  //
840    // cout << "Sample size=" << m_samplesize << endl;
841    // cout << "Buffers=" << m_buffsRequired << endl;
842    //
843    //   if (m_readytype!='E')
844    //   {
845    //     collapse();
846    //   }
847    //   if (m_op==IDENTITY)
848    //   {
849    //     return m_id;
850    //   }
851    //      // from this point on we must have m_op!=IDENTITY and m_readytype=='E'
852    //   size_t threadbuffersize=m_samplesize*(max(1,m_buffsRequired)+1);
853    //   int numthreads=1;
854    // #ifdef _OPENMP
855    //   numthreads=getNumberOfThreads();
856    //   int threadnum=0;
857    // #endif
858    //   ValueType v(numthreads*threadbuffersize);  
859    // cout << "Buffer created with size=" << v.size() << endl;
860    //   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));
861    //   ValueType& resvec=result->getVector();
862    //   DataReady_ptr resptr=DataReady_ptr(result);
863    //   int sample;
864    //   int resoffset;
865    //   int totalsamples=getNumSamples();
866    //   const double* res=0;
867    //   #pragma omp parallel for private(sample,resoffset,threadnum,res) schedule(static)
868    //   for (sample=0;sample<totalsamples;++sample)
869    //   {
870    // cout << "################################# " << sample << endl;
871    // #ifdef _OPENMP
872    //     res=resolveSample(v,sample,threadbuffersize*omp_get_thread_num());
873    // #else
874    //     res=resolveSample(v,sample,0);   // this would normally be v, but not if its a single IDENTITY op.
875    // #endif
876    // cerr << "-------------------------------- " << endl;
877    //     resoffset=result->getPointOffset(sample,0);
878    // cerr << "offset=" << resoffset << endl;
879    //     for (unsigned int i=0;i<m_samplesize;++i,++resoffset)    // copy values into the output vector
880    //     {
881    //  resvec[resoffset]=res[i];
882    //     }
883    // cerr << "*********************************" << endl;
884    //   }
885    //   return resptr;
886    // }
887    
888    
889  DataReady_ptr  DataReady_ptr
890  DataLazy::resolve()  DataLazy::resolve()
# Line 769  cout << "Buffers=" << m_buffsRequired << Line 910  cout << "Buffers=" << m_buffsRequired <<
910  #endif  #endif
911    ValueType v(numthreads*threadbuffersize);    ValueType v(numthreads*threadbuffersize);
912  cout << "Buffer created with size=" << v.size() << endl;  cout << "Buffer created with size=" << v.size() << endl;
913    ValueType dummy(getNoValues());    DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),  ValueType(getNoValues()));
   DataExpanded* result=new DataExpanded(getFunctionSpace(),getShape(),dummy);  
914    ValueType& resvec=result->getVector();    ValueType& resvec=result->getVector();
915    DataReady_ptr resptr=DataReady_ptr(result);    DataReady_ptr resptr=DataReady_ptr(result);
916    int sample;    int sample;
917    int resoffset;    size_t outoffset;     // offset in the output data
918    int totalsamples=getNumSamples();    int totalsamples=getNumSamples();
919    #pragma omp parallel for private(sample,resoffset,threadnum) schedule(static)    const ValueType* res=0;
920      size_t resoffset=0;
921      #pragma omp parallel for private(sample,resoffset,outoffset,threadnum,res) schedule(static)
922    for (sample=0;sample<totalsamples;++sample)    for (sample=0;sample<totalsamples;++sample)
923    {    {
924    cout << "################################# " << sample << endl;
925  #ifdef _OPENMP  #ifdef _OPENMP
926      const double* res=resolveSample(v,sample,threadbuffersize*omp_get_thread_num());      res=resolveSample(v,threadbuffersize*omp_get_thread_num(),sample,resoffset);
927  #else  #else
928      const double* res=resolveSample(v,sample,0);   // this would normally be v, but not if its a single IDENTITY op.      res=resolveSample(v,0,sample,resoffset);   // this would normally be v, but not if its a single IDENTITY op.
929  #endif  #endif
930      resoffset=result->getPointOffset(sample,0);  cerr << "-------------------------------- " << endl;
931      for (unsigned int i=0;i<m_samplesize;++i,++resoffset)   // copy values into the output vector      outoffset=result->getPointOffset(sample,0);
932    cerr << "offset=" << outoffset << endl;
933        for (unsigned int i=0;i<m_samplesize;++i,++outoffset,++resoffset)   // copy values into the output vector
934      {      {
935      resvec[resoffset]=res[i];      resvec[outoffset]=(*res)[resoffset];
936      }      }
937    cerr << "*********************************" << endl;
938    }    }
939    return resptr;    return resptr;
940  }  }

Legend:
Removed from v.1889  
changed lines
  Added in v.1898

  ViewVC Help
Powered by ViewVC 1.1.26