119 |
G_NP1OUT, // non-pointwise op with one output |
G_NP1OUT, // non-pointwise op with one output |
120 |
G_NP1OUT_P, // non-pointwise op with one output requiring a parameter |
G_NP1OUT_P, // non-pointwise op with one output requiring a parameter |
121 |
G_TENSORPROD, // general tensor product |
G_TENSORPROD, // general tensor product |
122 |
G_NP1OUT_2P // non-pointwise op with one output requiring two params |
G_NP1OUT_2P, // non-pointwise op with one output requiring two params |
123 |
|
G_REDUCTION // non-pointwise unary op with a scalar output |
124 |
}; |
}; |
125 |
|
|
126 |
|
|
135 |
"symmetric","nonsymmetric", |
"symmetric","nonsymmetric", |
136 |
"prod", |
"prod", |
137 |
"transpose", "trace", |
"transpose", "trace", |
138 |
"swapaxes"}; |
"swapaxes", |
139 |
int ES_opcount=41; |
"minval", "maxval"}; |
140 |
|
int ES_opcount=43; |
141 |
ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY, |
ES_opgroup opgroups[]={G_UNKNOWN,G_IDENTITY,G_BINARY,G_BINARY,G_BINARY,G_BINARY, G_BINARY, |
142 |
G_UNARY,G_UNARY,G_UNARY, //10 |
G_UNARY,G_UNARY,G_UNARY, //10 |
143 |
G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 17 |
G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY,G_UNARY, // 17 |
147 |
G_NP1OUT,G_NP1OUT, |
G_NP1OUT,G_NP1OUT, |
148 |
G_TENSORPROD, |
G_TENSORPROD, |
149 |
G_NP1OUT_P, G_NP1OUT_P, |
G_NP1OUT_P, G_NP1OUT_P, |
150 |
G_NP1OUT_2P}; |
G_NP1OUT_2P, |
151 |
|
G_REDUCTION, G_REDUCTION}; |
152 |
inline |
inline |
153 |
ES_opgroup |
ES_opgroup |
154 |
getOpgroup(ES_optype op) |
getOpgroup(ES_optype op) |
193 |
{ |
{ |
194 |
throw DataException("Shapes not the name - shapes must match for (point)binary operations."); |
throw DataException("Shapes not the name - shapes must match for (point)binary operations."); |
195 |
} |
} |
196 |
|
|
197 |
if (left->getRank()==0) // we need to allow scalar * anything |
if (left->getRank()==0) // we need to allow scalar * anything |
198 |
{ |
{ |
199 |
return right->getShape(); |
return right->getShape(); |
411 |
{ |
{ |
412 |
case G_IDENTITY: return 1; |
case G_IDENTITY: return 1; |
413 |
case G_BINARY: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1); |
case G_BINARY: return 1+max(left->getBuffsRequired(),right->getBuffsRequired()+1); |
414 |
|
case G_REDUCTION: |
415 |
case G_UNARY: |
case G_UNARY: |
416 |
case G_UNARY_P: return max(left->getBuffsRequired(),1); |
case G_UNARY_P: return max(left->getBuffsRequired(),1); |
417 |
case G_NP1OUT: return 1+max(left->getBuffsRequired(),1); |
case G_NP1OUT: return 1+max(left->getBuffsRequired(),1); |
485 |
} |
} |
486 |
|
|
487 |
DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op) |
DataLazy::DataLazy(DataAbstract_ptr left, ES_optype op) |
488 |
: parent(left->getFunctionSpace(),left->getShape()), |
: parent(left->getFunctionSpace(),(getOpgroup(op)!=G_REDUCTION)?left->getShape():DataTypes::scalarShape), |
489 |
m_op(op), |
m_op(op), |
490 |
m_axis_offset(0), |
m_axis_offset(0), |
491 |
m_transpose(0), |
m_transpose(0), |
492 |
m_SL(0), m_SM(0), m_SR(0) |
m_SL(0), m_SM(0), m_SR(0) |
493 |
{ |
{ |
494 |
if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT)) |
if ((getOpgroup(op)!=G_UNARY) && (getOpgroup(op)!=G_NP1OUT) && (getOpgroup(op)!=G_REDUCTION)) |
495 |
{ |
{ |
496 |
throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations."); |
throw DataException("Programmer error - constructor DataLazy(left, op) will only process UNARY operations."); |
497 |
} |
} |
938 |
case SWAP: |
case SWAP: |
939 |
result=left.swapaxes(m_axis_offset, m_transpose); |
result=left.swapaxes(m_axis_offset, m_transpose); |
940 |
break; |
break; |
941 |
|
case MINVAL: |
942 |
|
result=left.minval(); |
943 |
|
break; |
944 |
|
case MAXVAL: |
945 |
|
result=left.minval(); |
946 |
|
break; |
947 |
default: |
default: |
948 |
throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+"."); |
throw DataException("Programmer error - collapseToReady does not know how to resolve operator "+opToString(m_op)+"."); |
949 |
} |
} |
1110 |
} |
} |
1111 |
|
|
1112 |
|
|
1113 |
|
/* |
1114 |
|
\brief Compute the value of the expression (reduction operation) for the given sample. |
1115 |
|
\return Vector which stores the value of the subexpression for the given sample. |
1116 |
|
\param v A vector to store intermediate results. |
1117 |
|
\param offset Index in v to begin storing results. |
1118 |
|
\param sampleNo Sample number to evaluate. |
1119 |
|
\param roffset (output parameter) the offset in the return vector where the result begins. |
1120 |
|
|
1121 |
|
The return value will be an existing vector so do not deallocate it. |
1122 |
|
If the result is stored in v it should be stored at the offset given. |
1123 |
|
Everything from offset to the end of v should be considered available for this method to use. |
1124 |
|
*/ |
1125 |
|
DataTypes::ValueType* |
1126 |
|
DataLazy::resolveReduction(ValueType& v, size_t offset, int sampleNo, size_t& roffset) const |
1127 |
|
{ |
1128 |
|
// we assume that any collapsing has been done before we get here |
1129 |
|
// since we only have one argument we don't need to think about only |
1130 |
|
// processing single points. |
1131 |
|
if (m_readytype!='E') |
1132 |
|
{ |
1133 |
|
throw DataException("Programmer error - resolveUnary should only be called on expanded Data."); |
1134 |
|
} |
1135 |
|
const ValueType* vleft=m_left->resolveVectorSample(v,offset,sampleNo,roffset); |
1136 |
|
double* result=&(v[offset]); |
1137 |
|
roffset=offset; |
1138 |
|
switch (m_op) |
1139 |
|
{ |
1140 |
|
case MINVAL: |
1141 |
|
{ |
1142 |
|
FMin op; |
1143 |
|
*result=DataMaths::reductionOp(*vleft, m_left->getShape(), roffset, op, numeric_limits<double>::max()); |
1144 |
|
} |
1145 |
|
break; |
1146 |
|
case MAXVAL: |
1147 |
|
{ |
1148 |
|
FMax op; |
1149 |
|
*result=DataMaths::reductionOp(*vleft, m_left->getShape(), roffset, op, numeric_limits<double>::max()*-1); |
1150 |
|
} |
1151 |
|
break; |
1152 |
|
default: |
1153 |
|
throw DataException("Programmer error - resolveReduction can not resolve operator "+opToString(m_op)+"."); |
1154 |
|
} |
1155 |
|
return &v; |
1156 |
|
} |
1157 |
|
|
1158 |
|
|
1159 |
|
|
1682 |
case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset); |
case G_NP1OUT_P: return resolveNodeNP1OUT_P(tid, sampleNo, roffset); |
1683 |
case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset); |
case G_TENSORPROD: return resolveNodeTProd(tid, sampleNo, roffset); |
1684 |
case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset); |
case G_NP1OUT_2P: return resolveNodeNP1OUT_2P(tid, sampleNo, roffset); |
1685 |
|
case G_REDUCTION: return resolveNodeReduction(tid, sampleNo, roffset); |
1686 |
default: |
default: |
1687 |
throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+"."); |
throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+"."); |
1688 |
} |
} |
1821 |
|
|
1822 |
|
|
1823 |
const DataTypes::ValueType* |
const DataTypes::ValueType* |
1824 |
|
DataLazy::resolveNodeReduction(int tid, int sampleNo, size_t& roffset) |
1825 |
|
{ |
1826 |
|
// we assume that any collapsing has been done before we get here |
1827 |
|
// since we only have one argument we don't need to think about only |
1828 |
|
// processing single points. |
1829 |
|
// we will also know we won't get identity nodes |
1830 |
|
if (m_readytype!='E') |
1831 |
|
{ |
1832 |
|
throw DataException("Programmer error - resolveUnary should only be called on expanded Data."); |
1833 |
|
} |
1834 |
|
if (m_op==IDENTITY) |
1835 |
|
{ |
1836 |
|
throw DataException("Programmer error - resolveNodeUnary should not be called on identity nodes."); |
1837 |
|
} |
1838 |
|
size_t loffset=0; |
1839 |
|
const DataTypes::ValueType* leftres=m_left->resolveNodeSample(tid, sampleNo, loffset); |
1840 |
|
|
1841 |
|
roffset=m_samplesize*tid; |
1842 |
|
double* result=&(m_samples[roffset]); |
1843 |
|
switch (m_op) |
1844 |
|
{ |
1845 |
|
case MINVAL: |
1846 |
|
{ |
1847 |
|
FMin op; |
1848 |
|
*result=DataMaths::reductionOp(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max()); |
1849 |
|
} |
1850 |
|
break; |
1851 |
|
case MAXVAL: |
1852 |
|
{ |
1853 |
|
FMax op; |
1854 |
|
*result=DataMaths::reductionOp(*leftres, m_left->getShape(), loffset, op, numeric_limits<double>::max()*-1); |
1855 |
|
} |
1856 |
|
break; |
1857 |
|
default: |
1858 |
|
throw DataException("Programmer error - resolveUnary can not resolve operator "+opToString(m_op)+"."); |
1859 |
|
} |
1860 |
|
return &(m_samples); |
1861 |
|
} |
1862 |
|
|
1863 |
|
const DataTypes::ValueType* |
1864 |
DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset) |
DataLazy::resolveNodeNP1OUT(int tid, int sampleNo, size_t& roffset) |
1865 |
{ |
{ |
1866 |
// we assume that any collapsing has been done before we get here |
// we assume that any collapsing has been done before we get here |
2267 |
case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset); |
case G_NP1OUT_P: return resolveNP1OUT_P(v, offset, sampleNo,roffset); |
2268 |
case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset); |
case G_TENSORPROD: return resolveTProd(v,offset, sampleNo,roffset); |
2269 |
case G_NP1OUT_2P: return resolveNP1OUT_2P(v, offset, sampleNo, roffset); |
case G_NP1OUT_2P: return resolveNP1OUT_2P(v, offset, sampleNo, roffset); |
2270 |
|
case G_REDUCTION: return resolveReduction(v, offset, sampleNo, roffset); |
2271 |
default: |
default: |
2272 |
throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+"."); |
throw DataException("Programmer Error - resolveSample does not know how to process "+opToString(m_op)+"."); |
2273 |
} |
} |
2477 |
case G_UNARY_P: |
case G_UNARY_P: |
2478 |
case G_NP1OUT: |
case G_NP1OUT: |
2479 |
case G_NP1OUT_P: |
case G_NP1OUT_P: |
2480 |
|
case G_REDUCTION: |
2481 |
oss << opToString(m_op) << '('; |
oss << opToString(m_op) << '('; |
2482 |
m_left->intoString(oss); |
m_left->intoString(oss); |
2483 |
oss << ')'; |
oss << ')'; |
2506 |
switch (getOpgroup(m_op)) |
switch (getOpgroup(m_op)) |
2507 |
{ |
{ |
2508 |
case G_IDENTITY: return new DataLazy(m_id->deepCopy()->getPtr()); |
case G_IDENTITY: return new DataLazy(m_id->deepCopy()->getPtr()); |
2509 |
case G_UNARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_op); |
case G_UNARY: |
2510 |
|
case G_REDUCTION: return new DataLazy(m_left->deepCopy()->getPtr(),m_op); |
2511 |
|
case G_UNARY_P: return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_tol); |
2512 |
case G_BINARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op); |
case G_BINARY: return new DataLazy(m_left->deepCopy()->getPtr(),m_right->deepCopy()->getPtr(),m_op); |
2513 |
case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op); |
case G_NP1OUT: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(),m_op); |
2514 |
case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose); |
case G_TENSORPROD: return new DataLazy(m_left->deepCopy()->getPtr(), m_right->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose); |
2515 |
|
case G_NP1OUT_P: return new DataLazy(m_left->deepCopy()->getPtr(),m_op, m_axis_offset); |
2516 |
|
case G_NP1OUT_2P: return new DataLazy(m_left->deepCopy()->getPtr(), m_op, m_axis_offset, m_transpose); |
2517 |
default: |
default: |
2518 |
throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+"."); |
throw DataException("Programmer error - do not know how to deepcopy operator "+opToString(m_op)+"."); |
2519 |
} |
} |
2520 |
} |
} |
2521 |
|
|
2522 |
|
|
2523 |
|
|
2524 |
// There is no single, natural interpretation of getLength on DataLazy. |
// There is no single, natural interpretation of getLength on DataLazy. |
2525 |
// Instances of DataReady can look at the size of their vectors. |
// Instances of DataReady can look at the size of their vectors. |
2526 |
// For lazy though, it could be the size the data would be if it were resolved; |
// For lazy though, it could be the size the data would be if it were resolved; |