ESYS13  Revision_
Data.h
Go to the documentation of this file.
00001 
00002 /*******************************************************
00003 *
00004 * Copyright (c) 2003-2012 by University of Queensland
00005 * Earth Systems Science Computational Center (ESSCC)
00006 * http://www.uq.edu.au/esscc
00007 *
00008 * Primary Business: Queensland, Australia
00009 * Licensed under the Open Software License version 3.0
00010 * http://www.opensource.org/licenses/osl-3.0.php
00011 *
00012 *******************************************************/
00013 
00014 
00017 #ifndef DATA_H
00018 #define DATA_H
00019 #include "system_dep.h"
00020 
00021 #include "DataTypes.h"
00022 #include "DataAbstract.h"
00023 #include "DataAlgorithm.h"
00024 #include "FunctionSpace.h"
00025 #include "BinaryOp.h"
00026 #include "UnaryOp.h"
00027 #include "DataException.h"
00028 
00029 
00030 
00031 extern "C" {
00032 #include "DataC.h"
00033 //#include <omp.h>
00034 }
00035 
00036 #ifdef _OPENMP
00037 #include <omp.h>
00038 #endif
00039 
00040 #include "esysUtils/Esys_MPI.h"
00041 #include <string>
00042 #include <algorithm>
00043 #include <sstream>
00044 
00045 #include <boost/shared_ptr.hpp>
00046 #include <boost/python/object.hpp>
00047 #include <boost/python/tuple.hpp>
00048 
00049 namespace escript {
00050 
00051 //
00052 // Forward declaration for various implementations of Data.
00053 class DataConstant;
00054 class DataTagged;
00055 class DataExpanded;
00056 class DataLazy;
00057 
00071 class Data {
00072 
00073   public:
00074 
00075   // These typedefs allow function names to be cast to pointers
00076   // to functions of the appropriate type when calling unaryOp etc.
00077   typedef double (*UnaryDFunPtr)(double);
00078   typedef double (*BinaryDFunPtr)(double,double);
00079 
00080 
00090   ESCRIPT_DLL_API
00091   Data();
00092 
00098   ESCRIPT_DLL_API
00099   Data(const Data& inData);
00100 
00107   ESCRIPT_DLL_API
00108   Data(const Data& inData,
00109        const FunctionSpace& what);
00110 
00115   ESCRIPT_DLL_API
00116   Data(const DataTypes::ValueType& value,
00117          const DataTypes::ShapeType& shape,
00118                  const FunctionSpace& what=FunctionSpace(),
00119                  bool expanded=false);
00120 
00132   ESCRIPT_DLL_API
00133   Data(double value,
00134        const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
00135        const FunctionSpace& what=FunctionSpace(),
00136        bool expanded=false);
00137 
00145   ESCRIPT_DLL_API
00146   Data(const Data& inData,
00147        const DataTypes::RegionType& region);
00148 
00159   ESCRIPT_DLL_API
00160   Data(const boost::python::object& value,
00161        const FunctionSpace& what=FunctionSpace(),
00162        bool expanded=false);
00163 
00174   ESCRIPT_DLL_API     
00175   Data(const WrappedArray& w, const FunctionSpace& what,
00176            bool expanded=false);       
00177        
00178 
00188   ESCRIPT_DLL_API
00189   Data(const boost::python::object& value,
00190        const Data& other);
00191 
00196   ESCRIPT_DLL_API
00197   Data(double value,
00198        const boost::python::tuple& shape=boost::python::make_tuple(),
00199        const FunctionSpace& what=FunctionSpace(),
00200        bool expanded=false);
00201 
00206   ESCRIPT_DLL_API
00207   explicit Data(DataAbstract* underlyingdata);
00208 
00212   ESCRIPT_DLL_API
00213   explicit Data(DataAbstract_ptr underlyingdata);
00214 
00219   ESCRIPT_DLL_API
00220   ~Data();
00221 
00225   ESCRIPT_DLL_API
00226   void
00227   copy(const Data& other);
00228 
00232   ESCRIPT_DLL_API
00233   Data
00234   copySelf();
00235 
00236 
00240   ESCRIPT_DLL_API
00241   Data
00242   delay();
00243 
00247   ESCRIPT_DLL_API
00248   void 
00249   delaySelf();
00250 
00251 
00261   ESCRIPT_DLL_API
00262   void
00263   setProtection();
00264 
00270   ESCRIPT_DLL_API
00271   bool
00272   isProtected() const;
00273 
00274 
00279   ESCRIPT_DLL_API
00280   const boost::python::object
00281   getValueOfDataPointAsTuple(int dataPointNo);
00282 
00287   ESCRIPT_DLL_API
00288   void
00289   setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
00290 
00295   ESCRIPT_DLL_API
00296   void
00297   setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
00298 
00303   ESCRIPT_DLL_API
00304   void
00305   setValueOfDataPoint(int dataPointNo, const double);
00306 
00310   ESCRIPT_DLL_API
00311   const boost::python::object
00312   getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
00313 
00314   
00318   ESCRIPT_DLL_API
00319   void
00320   setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
00321   
00327   ESCRIPT_DLL_API
00328   int
00329   getTagNumber(int dpno);
00330 
00335   ESCRIPT_DLL_API
00336   escriptDataC
00337   getDataC();
00338 
00339 
00340 
00345   ESCRIPT_DLL_API
00346   escriptDataC
00347   getDataC() const;
00348 
00349 
00354   ESCRIPT_DLL_API
00355   std::string
00356   toString() const;
00357 
00362   ESCRIPT_DLL_API
00363   void
00364   expand();
00365 
00372   ESCRIPT_DLL_API
00373   void
00374   tag();
00375 
00380   ESCRIPT_DLL_API
00381   void
00382   resolve();
00383 
00384 
00392   ESCRIPT_DLL_API
00393   void
00394   requireWrite();
00395 
00401   ESCRIPT_DLL_API
00402   bool
00403   isExpanded() const;
00404 
00410   ESCRIPT_DLL_API
00411   bool
00412   actsExpanded() const;
00413   
00414 
00419   ESCRIPT_DLL_API
00420   bool
00421   isTagged() const;
00422 
00427   ESCRIPT_DLL_API
00428   bool
00429   isConstant() const;
00430 
00434   ESCRIPT_DLL_API
00435   bool
00436   isLazy() const;
00437 
00441   ESCRIPT_DLL_API
00442   bool
00443   isReady() const;
00444 
00450   ESCRIPT_DLL_API
00451   bool
00452   isEmpty() const;
00453 
00458   ESCRIPT_DLL_API
00459   inline
00460   const FunctionSpace&
00461   getFunctionSpace() const
00462   {
00463     return m_data->getFunctionSpace();
00464   }
00465 
00470   ESCRIPT_DLL_API
00471   const FunctionSpace
00472   getCopyOfFunctionSpace() const;
00473 
00478   ESCRIPT_DLL_API
00479   inline
00480 //   const AbstractDomain&
00481   const_Domain_ptr
00482   getDomain() const
00483   {
00484      return getFunctionSpace().getDomain();
00485   }
00486 
00487 
00493   ESCRIPT_DLL_API
00494   inline
00495 //   const AbstractDomain&
00496   Domain_ptr
00497   getDomainPython() const
00498   {
00499      return getFunctionSpace().getDomainPython();
00500   }
00501 
00506   ESCRIPT_DLL_API
00507   const AbstractDomain
00508   getCopyOfDomain() const;
00509 
00514   ESCRIPT_DLL_API
00515   inline
00516   unsigned int
00517   getDataPointRank() const
00518   {
00519     return m_data->getRank();
00520   }
00521 
00526   ESCRIPT_DLL_API
00527   inline
00528   int
00529   getNumDataPoints() const
00530   {
00531     return getNumSamples() * getNumDataPointsPerSample();
00532   }
00537   ESCRIPT_DLL_API
00538   inline
00539   int
00540   getNumSamples() const
00541   {
00542     return m_data->getNumSamples();
00543   }
00544 
00549   ESCRIPT_DLL_API
00550   inline
00551   int
00552   getNumDataPointsPerSample() const
00553   {
00554     return m_data->getNumDPPSample();
00555   }
00556 
00557 
00562   ESCRIPT_DLL_API
00563   int
00564   getNoValues() const
00565   {
00566     return m_data->getNoValues();
00567   }
00568 
00569 
00574   ESCRIPT_DLL_API
00575   void
00576   dump(const std::string fileName) const;
00577 
00584   ESCRIPT_DLL_API
00585   const boost::python::object
00586   toListOfTuples(bool scalarastuple=true);
00587 
00588 
00596   ESCRIPT_DLL_API
00597   inline
00598   const DataAbstract::ValueType::value_type*
00599   getSampleDataRO(DataAbstract::ValueType::size_type sampleNo);
00600 
00601 
00609   ESCRIPT_DLL_API
00610   inline
00611   DataAbstract::ValueType::value_type*
00612   getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
00613 
00614 
00621   ESCRIPT_DLL_API
00622   inline
00623   DataAbstract::ValueType::value_type*
00624   getSampleDataByTag(int tag)
00625   {
00626     return m_data->getSampleDataByTag(tag);
00627   }
00628 
00635   ESCRIPT_DLL_API
00636   DataTypes::ValueType::const_reference
00637   getDataPointRO(int sampleNo, int dataPointNo);
00638 
00645   ESCRIPT_DLL_API
00646   DataTypes::ValueType::reference
00647   getDataPointRW(int sampleNo, int dataPointNo);
00648 
00649 
00650 
00655   ESCRIPT_DLL_API
00656   inline
00657   DataTypes::ValueType::size_type
00658   getDataOffset(int sampleNo,
00659                int dataPointNo)
00660   {
00661       return m_data->getPointOffset(sampleNo,dataPointNo);
00662   }
00663 
00668   ESCRIPT_DLL_API
00669   inline
00670   const DataTypes::ShapeType&
00671   getDataPointShape() const
00672   {
00673     return m_data->getShape();
00674   }
00675 
00680   ESCRIPT_DLL_API
00681   const boost::python::tuple
00682   getShapeTuple() const;
00683 
00689   ESCRIPT_DLL_API
00690   int
00691   getDataPointSize() const;
00692 
00697   ESCRIPT_DLL_API
00698   DataTypes::ValueType::size_type
00699   getLength() const;
00700 
00705   ESCRIPT_DLL_API
00706   bool
00707   hasNoSamples() const
00708   {
00709     return getLength()==0;
00710   }
00711 
00720   ESCRIPT_DLL_API
00721   void
00722   setTaggedValueByName(std::string name,
00723                        const boost::python::object& value);
00724 
00734   ESCRIPT_DLL_API
00735   void
00736   setTaggedValue(int tagKey,
00737                  const boost::python::object& value);
00738 
00749   ESCRIPT_DLL_API
00750   void
00751   setTaggedValueFromCPP(int tagKey,
00752             const DataTypes::ShapeType& pointshape,
00753                         const DataTypes::ValueType& value,
00754             int dataOffset=0);
00755 
00756 
00757 
00762   ESCRIPT_DLL_API
00763   void
00764   copyWithMask(const Data& other,
00765                const Data& mask);
00766 
00776   ESCRIPT_DLL_API
00777   void
00778   setToZero();
00779 
00786   ESCRIPT_DLL_API
00787   Data
00788   interpolate(const FunctionSpace& functionspace) const;
00789 
00790   ESCRIPT_DLL_API
00791   Data
00792   interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
00793                        double undef, Data& B, double Bmin, double Bstep, Data& C, 
00794             double Cmin, double Cstep, bool check_boundaries);
00795 
00796   ESCRIPT_DLL_API
00797   Data
00798   interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
00799                        double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
00800 
00801   ESCRIPT_DLL_API
00802   Data
00803   interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
00804                        double undef,bool check_boundaries);
00805 
00806 
00807   ESCRIPT_DLL_API
00808   Data
00809   interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
00810                         Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
00811 
00812 
00813   ESCRIPT_DLL_API
00814   Data
00815   interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
00816                         Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
00817 
00818   ESCRIPT_DLL_API
00819   Data
00820   interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
00821                         double undef,bool check_boundaries);
00822 
00829   ESCRIPT_DLL_API
00830   Data
00831   gradOn(const FunctionSpace& functionspace) const;
00832 
00833   ESCRIPT_DLL_API
00834   Data
00835   grad() const;
00836 
00841   ESCRIPT_DLL_API
00842   boost::python::object
00843   integrateToTuple_const() const;
00844 
00845 
00850   ESCRIPT_DLL_API
00851   boost::python::object
00852   integrateToTuple();
00853 
00854 
00855 
00861   ESCRIPT_DLL_API
00862   Data
00863   oneOver() const;
00869   ESCRIPT_DLL_API
00870   Data
00871   wherePositive() const;
00872 
00878   ESCRIPT_DLL_API
00879   Data
00880   whereNegative() const;
00881 
00887   ESCRIPT_DLL_API
00888   Data
00889   whereNonNegative() const;
00890 
00896   ESCRIPT_DLL_API
00897   Data
00898   whereNonPositive() const;
00899 
00905   ESCRIPT_DLL_API
00906   Data
00907   whereZero(double tol=0.0) const;
00908 
00914   ESCRIPT_DLL_API
00915   Data
00916   whereNonZero(double tol=0.0) const;
00917 
00929   ESCRIPT_DLL_API
00930   double
00931   Lsup();
00932 
00933   ESCRIPT_DLL_API
00934   double
00935   Lsup_const() const;
00936 
00937 
00949   ESCRIPT_DLL_API
00950   double
00951   sup();
00952 
00953   ESCRIPT_DLL_API
00954   double
00955   sup_const() const;
00956 
00957 
00969   ESCRIPT_DLL_API
00970   double
00971   inf();
00972 
00973   ESCRIPT_DLL_API
00974   double
00975   inf_const() const;
00976 
00977 
00978 
00984   ESCRIPT_DLL_API
00985   Data
00986   abs() const;
00987 
00993   ESCRIPT_DLL_API
00994   Data
00995   maxval() const;
00996 
01002   ESCRIPT_DLL_API
01003   Data
01004   minval() const;
01005 
01013   ESCRIPT_DLL_API
01014   const boost::python::tuple
01015   minGlobalDataPoint() const;
01016 
01024   ESCRIPT_DLL_API
01025   const boost::python::tuple
01026   maxGlobalDataPoint() const;
01027 
01028 
01029 
01036   ESCRIPT_DLL_API
01037   Data
01038   sign() const;
01039 
01045   ESCRIPT_DLL_API
01046   Data
01047   symmetric() const;
01048 
01054   ESCRIPT_DLL_API
01055   Data
01056   nonsymmetric() const;
01057 
01063   ESCRIPT_DLL_API
01064   Data
01065   trace(int axis_offset) const;
01066 
01072   ESCRIPT_DLL_API
01073   Data
01074   transpose(int axis_offset) const;
01075 
01082   ESCRIPT_DLL_API
01083   Data
01084   eigenvalues() const;
01085 
01095   ESCRIPT_DLL_API
01096   const boost::python::tuple
01097   eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
01098 
01104   ESCRIPT_DLL_API
01105   Data
01106   swapaxes(const int axis0, const int axis1) const;
01107 
01113   ESCRIPT_DLL_API
01114   Data
01115   erf() const;
01116 
01122   ESCRIPT_DLL_API
01123   Data
01124   sin() const;
01125 
01131   ESCRIPT_DLL_API
01132   Data
01133   cos() const;
01134 
01140   ESCRIPT_DLL_API
01141   Data
01142   tan() const;
01143 
01149   ESCRIPT_DLL_API
01150   Data
01151   asin() const;
01152 
01158   ESCRIPT_DLL_API
01159   Data
01160   acos() const;
01161 
01167   ESCRIPT_DLL_API
01168   Data
01169   atan() const;
01170 
01176   ESCRIPT_DLL_API
01177   Data
01178   sinh() const;
01179 
01185   ESCRIPT_DLL_API
01186   Data
01187   cosh() const;
01188 
01194   ESCRIPT_DLL_API
01195   Data
01196   tanh() const;
01197 
01203   ESCRIPT_DLL_API
01204   Data
01205   asinh() const;
01206 
01212   ESCRIPT_DLL_API
01213   Data
01214   acosh() const;
01215 
01221   ESCRIPT_DLL_API
01222   Data
01223   atanh() const;
01224 
01230   ESCRIPT_DLL_API
01231   Data
01232   log10() const;
01233 
01239   ESCRIPT_DLL_API
01240   Data
01241   log() const;
01242 
01248   ESCRIPT_DLL_API
01249   Data
01250   exp() const;
01251 
01257   ESCRIPT_DLL_API
01258   Data
01259   sqrt() const;
01260 
01266   ESCRIPT_DLL_API
01267   Data
01268   neg() const;
01269 
01276   ESCRIPT_DLL_API
01277   Data
01278   pos() const;
01279 
01287   ESCRIPT_DLL_API
01288   Data
01289   powD(const Data& right) const;
01290 
01298   ESCRIPT_DLL_API
01299   Data
01300   powO(const boost::python::object& right) const;
01301 
01310   ESCRIPT_DLL_API
01311   Data
01312   rpowO(const boost::python::object& left) const;
01313 
01318   ESCRIPT_DLL_API
01319   void
01320   saveDX(std::string fileName) const;
01321 
01326   ESCRIPT_DLL_API
01327   void
01328   saveVTK(std::string fileName) const;
01329 
01330 
01331 
01338   ESCRIPT_DLL_API
01339   Data& operator+=(const Data& right);
01340   ESCRIPT_DLL_API
01341   Data& operator+=(const boost::python::object& right);
01342 
01343   ESCRIPT_DLL_API
01344   Data& operator=(const Data& other);
01345 
01352   ESCRIPT_DLL_API
01353   Data& operator-=(const Data& right);
01354   ESCRIPT_DLL_API
01355   Data& operator-=(const boost::python::object& right);
01356 
01363   ESCRIPT_DLL_API
01364   Data& operator*=(const Data& right);
01365   ESCRIPT_DLL_API
01366   Data& operator*=(const boost::python::object& right);
01367 
01374   ESCRIPT_DLL_API
01375   Data& operator/=(const Data& right);
01376   ESCRIPT_DLL_API
01377   Data& operator/=(const boost::python::object& right);
01378 
01383   ESCRIPT_DLL_API
01384   Data truedivD(const Data& right);
01385 
01390   ESCRIPT_DLL_API
01391   Data truedivO(const boost::python::object& right);
01392 
01397   ESCRIPT_DLL_API
01398   Data rtruedivO(const boost::python::object& left);
01399 
01404   ESCRIPT_DLL_API
01405   boost::python::object __add__(const boost::python::object& right);
01406   
01407 
01412   ESCRIPT_DLL_API
01413   boost::python::object __sub__(const boost::python::object& right);
01414   
01419   ESCRIPT_DLL_API
01420   boost::python::object __rsub__(const boost::python::object& right);  
01421 
01426   ESCRIPT_DLL_API
01427   boost::python::object __mul__(const boost::python::object& right);
01428     
01433   ESCRIPT_DLL_API
01434   boost::python::object __div__(const boost::python::object& right);
01435   
01440   ESCRIPT_DLL_API
01441   boost::python::object __rdiv__(const boost::python::object& right);    
01442   
01446   ESCRIPT_DLL_API
01447   Data
01448   matrixInverse() const;
01449 
01454   ESCRIPT_DLL_API
01455   bool
01456   probeInterpolation(const FunctionSpace& functionspace) const;
01457 
01473   ESCRIPT_DLL_API
01474   Data
01475   getItem(const boost::python::object& key) const;
01476 
01488   ESCRIPT_DLL_API
01489   void
01490   setItemD(const boost::python::object& key,
01491            const Data& value);
01492 
01493   ESCRIPT_DLL_API
01494   void
01495   setItemO(const boost::python::object& key,
01496            const boost::python::object& value);
01497 
01498   // These following public methods should be treated as private.
01499 
01505   template <class UnaryFunction>
01506   ESCRIPT_DLL_API
01507   inline
01508   void
01509   unaryOp2(UnaryFunction operation);
01510 
01518   ESCRIPT_DLL_API
01519   Data
01520   getSlice(const DataTypes::RegionType& region) const;
01521 
01530   ESCRIPT_DLL_API
01531   void
01532   setSlice(const Data& value,
01533            const DataTypes::RegionType& region);
01534 
01539   ESCRIPT_DLL_API
01540   void
01541         print(void);
01542 
01549   ESCRIPT_DLL_API
01550         int
01551         get_MPIRank(void) const;
01552 
01559   ESCRIPT_DLL_API
01560         int
01561         get_MPISize(void) const;
01562 
01568   ESCRIPT_DLL_API
01569         MPI_Comm
01570         get_MPIComm(void) const;
01571 
01577   ESCRIPT_DLL_API
01578         DataAbstract*
01579         borrowData(void) const;
01580 
01581   ESCRIPT_DLL_API
01582         DataAbstract_ptr
01583         borrowDataPtr(void) const;
01584 
01585   ESCRIPT_DLL_API
01586         DataReady_ptr
01587         borrowReadyPtr(void) const;
01588 
01589 
01590 
01598   ESCRIPT_DLL_API
01599         DataTypes::ValueType::const_reference
01600         getDataAtOffsetRO(DataTypes::ValueType::size_type i);
01601 
01602 
01603   ESCRIPT_DLL_API
01604         DataTypes::ValueType::reference
01605         getDataAtOffsetRW(DataTypes::ValueType::size_type i);
01606 
01607  
01608  protected:
01609 
01610  private:
01611 
01612 template <class BinaryOp>
01613   double 
01614 #ifdef ESYS_MPI
01615   lazyAlgWorker(double init, MPI_Op mpiop_type);
01616 #else
01617   lazyAlgWorker(double init);
01618 #endif
01619 
01620   double
01621   LsupWorker() const;
01622 
01623   double
01624   supWorker() const;
01625 
01626   double
01627   infWorker() const;
01628 
01629   boost::python::object
01630   integrateWorker() const;
01631 
01632   void
01633   calc_minGlobalDataPoint(int& ProcNo,  int& DataPointNo) const;
01634 
01635   void
01636   calc_maxGlobalDataPoint(int& ProcNo,  int& DataPointNo) const;
01637 
01638   // For internal use in Data.cpp only!
01639   // other uses should call the main entry points and allow laziness
01640   Data
01641   minval_nonlazy() const;
01642 
01643   // For internal use in Data.cpp only!
01644   Data
01645   maxval_nonlazy() const;
01646 
01647 
01654   inline
01655   void
01656   operandCheck(const Data& right) const
01657   {
01658     return m_data->operandCheck(*(right.m_data.get()));
01659   }
01660 
01666   template <class BinaryFunction>
01667   inline
01668   double
01669   algorithm(BinaryFunction operation,
01670             double initial_value) const;
01671 
01679   template <class BinaryFunction>
01680   inline
01681   Data
01682   dp_algorithm(BinaryFunction operation,
01683                double initial_value) const;
01684 
01693   template <class BinaryFunction>
01694   inline
01695   void
01696   binaryOp(const Data& right,
01697            BinaryFunction operation);
01698 
01704   void
01705   typeMatchLeft(Data& right) const;
01706 
01712   void
01713   typeMatchRight(const Data& right);
01714 
01720   void
01721   initialise(const DataTypes::ValueType& value,
01722          const DataTypes::ShapeType& shape,
01723              const FunctionSpace& what,
01724              bool expanded);
01725 
01726   void
01727   initialise(const WrappedArray& value,
01728                  const FunctionSpace& what,
01729                  bool expanded);
01730 
01731   void
01732   initialise(const double value,
01733          const DataTypes::ShapeType& shape,
01734              const FunctionSpace& what,
01735              bool expanded);
01736 
01737   //
01738   // flag to protect the data object against any update
01739   bool m_protected;
01740   mutable bool m_shared;
01741   bool m_lazy;
01742 
01743   //
01744   // pointer to the actual data object
01745 //   boost::shared_ptr<DataAbstract> m_data;
01746   DataAbstract_ptr m_data;
01747 
01748 // If possible please use getReadyPtr instead.
01749 // But see warning below.
01750   const DataReady*
01751   getReady() const;
01752 
01753   DataReady*
01754   getReady();
01755 
01756 
01757 // Be wary of using this for local operations since it (temporarily) increases reference count.
01758 // If you are just using this to call a method on DataReady instead of DataAbstract consider using 
01759 // getReady() instead
01760   DataReady_ptr
01761   getReadyPtr();
01762 
01763   const_DataReady_ptr
01764   getReadyPtr() const;
01765 
01766 
01772   void updateShareStatus(bool nowshared) const
01773   {
01774     m_shared=nowshared;     // m_shared is mutable
01775   }
01776 
01777   // In the isShared() method below:
01778   // A problem would occur if m_data (the address pointed to) were being modified 
01779   // while the call m_data->is_shared is being executed.
01780   // 
01781   // Q: So why do I think this code can be thread safe/correct?
01782   // A: We need to make some assumptions.
01783   // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
01784   // 2. We assume that no constructions or assignments which will share previously unshared
01785   //    will occur while this call is executing. This is consistent with the way Data:: and C are written.
01786   //
01787   // This means that the only transition we need to consider, is when a previously shared object is
01788   // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
01789   // In those cases the m_shared flag changes to false after m_data has completed changing.
01790   // For any threads executing before the flag switches they will assume the object is still shared.
01791   bool isShared() const
01792   {
01793     return m_shared;
01794 /*  if (m_shared) return true;
01795     if (m_data->isShared())         
01796     {                   
01797         updateShareStatus(true);
01798         return true;
01799     }
01800     return false;*/
01801   }
01802 
01803   void forceResolve()
01804   {
01805     if (isLazy())
01806     {
01807         #ifdef _OPENMP
01808         if (omp_in_parallel())
01809         {   // Yes this is throwing an exception out of an omp thread which is forbidden.
01810         throw DataException("Please do not call forceResolve() in a parallel region.");
01811         }
01812         #endif
01813         resolve();
01814     }
01815   }
01816 
01821   void exclusiveWrite()
01822   {
01823 #ifdef _OPENMP
01824   if (omp_in_parallel())
01825   {
01826 // *((int*)0)=17;
01827     throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
01828   }
01829 #endif
01830     forceResolve();
01831     if (isShared())
01832     {
01833         DataAbstract* t=m_data->deepCopy();
01834         set_m_data(DataAbstract_ptr(t));
01835     }
01836   }
01837 
01841   void checkExclusiveWrite()
01842   {
01843     if  (isLazy() || isShared())
01844     {
01845         throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
01846     }
01847   }
01848 
01855   void set_m_data(DataAbstract_ptr p);
01856 
01857   friend class DataAbstract;        // To allow calls to updateShareStatus
01858   friend class TestDomain;      // so its getX will work quickly
01859 #ifdef IKNOWWHATIMDOING
01860   friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
01861 #endif
01862   friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
01863   friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed);
01864 
01865 };
01866 
01867 
01868 #ifdef IKNOWWHATIMDOING
01869 ESCRIPT_DLL_API
01870 Data
01871 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
01872 #endif
01873 
01874 
01875 ESCRIPT_DLL_API
01876 Data
01877 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
01878 
01879 
01880 
01884 ESCRIPT_DLL_API
01885 Data randomData(const boost::python::tuple& shape,
01886        const FunctionSpace& what,
01887        long seed);
01888 
01889 
01890 }   // end namespace escript
01891 
01892 
01893 // No, this is not supposed to be at the top of the file
01894 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
01895 // so that I can dynamic cast between them below.
01896 #include "DataReady.h"
01897 #include "DataLazy.h"
01898 
01899 namespace escript
01900 {
01901 
01902 inline
01903 const DataReady*
01904 Data::getReady() const
01905 {
01906    const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
01907    EsysAssert((dr!=0), "Error - casting to DataReady.");
01908    return dr;
01909 }
01910 
01911 inline
01912 DataReady*
01913 Data::getReady()
01914 {
01915    DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
01916    EsysAssert((dr!=0), "Error - casting to DataReady.");
01917    return dr;
01918 }
01919 
01920 // Be wary of using this for local operations since it (temporarily) increases reference count.
01921 // If you are just using this to call a method on DataReady instead of DataAbstract consider using 
01922 // getReady() instead
01923 inline
01924 DataReady_ptr
01925 Data::getReadyPtr()
01926 {
01927    DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
01928    EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
01929    return dr;
01930 }
01931 
01932 
01933 inline
01934 const_DataReady_ptr
01935 Data::getReadyPtr() const
01936 {
01937    const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
01938    EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
01939    return dr;
01940 }
01941 
01942 inline
01943 DataAbstract::ValueType::value_type*
01944 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
01945 {
01946    if (isLazy())
01947    {
01948     throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
01949    }
01950    return getReady()->getSampleDataRW(sampleNo);
01951 }
01952 
01953 inline
01954 const DataAbstract::ValueType::value_type*
01955 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo)
01956 {
01957    DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
01958    if (l!=0)
01959    {
01960     size_t offset=0;
01961     const DataTypes::ValueType* res=l->resolveSample(sampleNo,offset);
01962     return &((*res)[offset]);
01963    }
01964    return getReady()->getSampleDataRO(sampleNo);
01965 }
01966 
01967 
01968 
01972 char *Escript_MPI_appendRankToFileName(const char *, int, int);
01973 
01977 inline double rpow(double x,double y)
01978 {
01979     return pow(y,x);
01980 }
01981 
01987 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
01988 
01994 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
01995 
02001 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
02002 
02008 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
02009 
02016 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
02017 
02024 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
02025 
02032 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
02033 
02040 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
02041 
02048 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
02049 
02056 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
02057 
02064 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
02065 
02072 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
02073 
02074 
02075 
02080 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
02081 
02090 ESCRIPT_DLL_API
02091 Data
02092 C_GeneralTensorProduct(Data& arg_0,
02093                      Data& arg_1,
02094                      int axis_offset=0,
02095                      int transpose=0);
02096 
02102 inline
02103 Data
02104 Data::truedivD(const Data& right)
02105 {
02106     return *this / right;
02107 }
02108 
02114 inline
02115 Data
02116 Data::truedivO(const boost::python::object& right)
02117 {
02118     Data tmp(right, getFunctionSpace(), false);
02119     return truedivD(tmp);
02120 }
02121 
02127 inline
02128 Data
02129 Data::rtruedivO(const boost::python::object& left)
02130 {
02131     Data tmp(left, getFunctionSpace(), false);
02132     return tmp.truedivD(*this);
02133 }
02134 
02140 template <class BinaryFunction>
02141 inline
02142 void
02143 Data::binaryOp(const Data& right,
02144                BinaryFunction operation)
02145 {
02146    //
02147    // if this has a rank of zero promote it to the rank of the RHS
02148    if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
02149      throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
02150    }
02151 
02152    if (isLazy() || right.isLazy())
02153    {
02154      throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
02155    }
02156    //
02157    // initially make the temporary a shallow copy
02158    Data tempRight(right);
02159 
02160    if (getFunctionSpace()!=right.getFunctionSpace()) {
02161      if (right.probeInterpolation(getFunctionSpace())) {
02162        //
02163        // an interpolation is required so create a new Data
02164        tempRight=Data(right,this->getFunctionSpace());
02165      } else if (probeInterpolation(right.getFunctionSpace())) {
02166        //
02167        // interpolate onto the RHS function space
02168        Data tempLeft(*this,right.getFunctionSpace());
02169 //        m_data=tempLeft.m_data;
02170        set_m_data(tempLeft.m_data);
02171      }
02172    }
02173    operandCheck(tempRight);
02174    //
02175    // ensure this has the right type for the RHS
02176    typeMatchRight(tempRight);
02177    //
02178    // Need to cast to the concrete types so that the correct binaryOp
02179    // is called.
02180    if (isExpanded()) {
02181      //
02182      // Expanded data will be done in parallel, the right hand side can be
02183      // of any data type
02184      DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
02185      EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
02186      escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
02187    } else if (isTagged()) {
02188      //
02189      // Tagged data is operated on serially, the right hand side can be
02190      // either DataConstant or DataTagged
02191      DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
02192      EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
02193      if (right.isTagged()) {
02194        DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
02195        EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
02196        escript::binaryOp(*leftC,*rightC,operation);
02197      } else {
02198        DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
02199        EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
02200        escript::binaryOp(*leftC,*rightC,operation);
02201      }
02202    } else if (isConstant()) {
02203      DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
02204      DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
02205      EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
02206      escript::binaryOp(*leftC,*rightC,operation);
02207    }
02208 }
02209 
02217 template <class BinaryFunction>
02218 inline
02219 double
02220 Data::algorithm(BinaryFunction operation, double initial_value) const
02221 {
02222   if (isExpanded()) {
02223     DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
02224     EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
02225     return escript::algorithm(*leftC,operation,initial_value);
02226   } else if (isTagged()) {
02227     DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
02228     EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
02229     return escript::algorithm(*leftC,operation,initial_value);
02230   } else if (isConstant()) {
02231     DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
02232     EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
02233     return escript::algorithm(*leftC,operation,initial_value);
02234   } else if (isEmpty()) {
02235     throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02236   } else if (isLazy()) {
02237     throw DataException("Error - Operations not permitted on instances of DataLazy.");
02238   } else {
02239     throw DataException("Error - Data encapsulates an unknown type.");
02240   }
02241 }
02242 
02251 template <class BinaryFunction>
02252 inline
02253 Data
02254 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
02255 {
02256   if (isEmpty()) {
02257     throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02258   } 
02259   else if (isExpanded()) {
02260     Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
02261     DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
02262     DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
02263     EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
02264     EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
02265     escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
02266     return result;
02267   }
02268   else if (isTagged()) {
02269     DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
02270     EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
02271     DataTypes::ValueType defval(1);
02272     defval[0]=0;
02273     DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
02274     escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
02275     return Data(resultT);   // note: the Data object now owns the resultT pointer
02276   } 
02277   else if (isConstant()) {
02278     Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
02279     DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
02280     DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
02281     EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
02282     EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
02283     escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
02284     return result;
02285   } else if (isLazy()) {
02286     throw DataException("Error - Operations not permitted on instances of DataLazy.");
02287   } else {
02288     throw DataException("Error - Data encapsulates an unknown type.");
02289   }
02290 }
02291 
02299 template <typename BinaryFunction>
02300 inline
02301 Data
02302 C_TensorBinaryOperation(Data const &arg_0,
02303                         Data const &arg_1,
02304                         BinaryFunction operation)
02305 {
02306   if (arg_0.isEmpty() || arg_1.isEmpty())
02307   {
02308      throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02309   }
02310   if (arg_0.isLazy() || arg_1.isLazy())
02311   {
02312      throw DataException("Error - Operations not permitted on lazy data.");
02313   }
02314   // Interpolate if necessary and find an appropriate function space
02315   Data arg_0_Z, arg_1_Z;
02316   if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
02317     if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
02318       arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
02319       arg_1_Z = Data(arg_1);
02320     }
02321     else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
02322       arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
02323       arg_0_Z =Data(arg_0);
02324     }
02325     else {
02326       throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
02327     }
02328   } else {
02329       arg_0_Z = Data(arg_0);
02330       arg_1_Z = Data(arg_1);
02331   }
02332   // Get rank and shape of inputs
02333   int rank0 = arg_0_Z.getDataPointRank();
02334   int rank1 = arg_1_Z.getDataPointRank();
02335   DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
02336   DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
02337   int size0 = arg_0_Z.getDataPointSize();
02338   int size1 = arg_1_Z.getDataPointSize();
02339   // Declare output Data object
02340   Data res;
02341 
02342   if (shape0 == shape1) {
02343     if (arg_0_Z.isConstant()   && arg_1_Z.isConstant()) {
02344       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());      // DataConstant output
02345       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02346       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02347       double *ptr_2 = &(res.getDataAtOffsetRW(0));
02348 
02349       tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02350     }
02351     else if (arg_0_Z.isConstant()   && arg_1_Z.isTagged()) {
02352 
02353       // Prepare the DataConstant input
02354       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02355 
02356       // Borrow DataTagged input from Data object
02357       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02358 
02359       // Prepare a DataTagged output 2
02360       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());      // DataTagged output
02361       res.tag();
02362       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02363 
02364       // Prepare offset into DataConstant
02365       int offset_0 = tmp_0->getPointOffset(0,0);
02366       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02367 
02368       // Get the pointers to the actual data
02369       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02370       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02371 
02372       // Compute a result for the default
02373       tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02374       // Compute a result for each tag
02375       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02376       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02377       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02378         tmp_2->addTag(i->first);
02379         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02380         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02381 
02382         tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02383       }
02384 
02385     }
02386     else if (arg_0_Z.isConstant()   && arg_1_Z.isExpanded()) {
02387       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02388       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02389       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02390       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02391 
02392       int sampleNo_1,dataPointNo_1;
02393       int numSamples_1 = arg_1_Z.getNumSamples();
02394       int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02395       int offset_0 = tmp_0->getPointOffset(0,0);
02396       res.requireWrite();
02397       #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
02398       for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02399         for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02400           int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
02401           int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
02402           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02403           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1)); 
02404           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2)); 
02405           tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02406         }
02407       }
02408 
02409     }
02410     else if (arg_0_Z.isTagged()     && arg_1_Z.isConstant()) {
02411       // Borrow DataTagged input from Data object
02412       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02413 
02414       // Prepare the DataConstant input
02415       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02416 
02417       // Prepare a DataTagged output 2
02418       res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());      // DataTagged output
02419       res.tag();
02420       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02421 
02422       // Prepare offset into DataConstant
02423       int offset_1 = tmp_1->getPointOffset(0,0);
02424 
02425       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02426       // Get the pointers to the actual data
02427       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02428       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02429       // Compute a result for the default
02430       tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02431       // Compute a result for each tag
02432       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02433       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02434       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02435         tmp_2->addTag(i->first);
02436         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02437         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02438         tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02439       }
02440 
02441     }
02442     else if (arg_0_Z.isTagged()     && arg_1_Z.isTagged()) {
02443       // Borrow DataTagged input from Data object
02444       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02445 
02446       // Borrow DataTagged input from Data object
02447       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02448 
02449       // Prepare a DataTagged output 2
02450       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02451       res.tag();        // DataTagged output
02452       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02453 
02454       // Get the pointers to the actual data
02455       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02456       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02457       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02458 
02459       // Compute a result for the default
02460       tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02461       // Merge the tags
02462       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02463       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02464       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02465       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02466         tmp_2->addTag(i->first); // use tmp_2 to get correct shape
02467       }
02468       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02469         tmp_2->addTag(i->first);
02470       }
02471       // Compute a result for each tag
02472       const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02473       for (i=lookup_2.begin();i!=lookup_2.end();i++) {
02474 
02475         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02476         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02477         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02478 
02479         tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02480       }
02481 
02482     }
02483     else if (arg_0_Z.isTagged()     && arg_1_Z.isExpanded()) {
02484       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02485       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02486       DataTagged*   tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02487       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02488       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02489 
02490       int sampleNo_0,dataPointNo_0;
02491       int numSamples_0 = arg_0_Z.getNumSamples();
02492       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02493       res.requireWrite();
02494       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02495       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02496         int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
02497         const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02498         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02499           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02500           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02501           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02502           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02503           tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02504         }
02505       }
02506 
02507     }
02508     else if (arg_0_Z.isExpanded()   && arg_1_Z.isConstant()) {
02509       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02510       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02511       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02512       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02513 
02514       int sampleNo_0,dataPointNo_0;
02515       int numSamples_0 = arg_0_Z.getNumSamples();
02516       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02517       int offset_1 = tmp_1->getPointOffset(0,0);
02518       res.requireWrite();
02519       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02520       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02521         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02522           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02523           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02524 
02525           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02526           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02527           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02528 
02529 
02530           tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02531         }
02532       }
02533 
02534     }
02535     else if (arg_0_Z.isExpanded()   && arg_1_Z.isTagged()) {
02536       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02537       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02538       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02539       DataTagged*   tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02540       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02541 
02542       int sampleNo_0,dataPointNo_0;
02543       int numSamples_0 = arg_0_Z.getNumSamples();
02544       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02545       res.requireWrite();
02546       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02547       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02548         int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
02549         const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02550         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02551           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02552           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02553           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02554           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02555           tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02556         }
02557       }
02558 
02559     }
02560     else if (arg_0_Z.isExpanded()   && arg_1_Z.isExpanded()) {
02561       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02562       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02563       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02564       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02565       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02566 
02567       int sampleNo_0,dataPointNo_0;
02568       int numSamples_0 = arg_0_Z.getNumSamples();
02569       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02570       res.requireWrite();
02571       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02572       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02573       dataPointNo_0=0;
02574 //        for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02575           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02576           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02577           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02578           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02579           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02580           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02581           tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
02582 //       }
02583       }
02584 
02585     }
02586     else {
02587       throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
02588     }
02589 
02590   } else if (0 == rank0) {
02591     if (arg_0_Z.isConstant()   && arg_1_Z.isConstant()) {
02592       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());      // DataConstant output
02593       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02594       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02595       double *ptr_2 = &(res.getDataAtOffsetRW(0));
02596       tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02597     }
02598     else if (arg_0_Z.isConstant()   && arg_1_Z.isTagged()) {
02599 
02600       // Prepare the DataConstant input
02601       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02602 
02603       // Borrow DataTagged input from Data object
02604       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02605 
02606       // Prepare a DataTagged output 2
02607       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());      // DataTagged output
02608       res.tag();
02609       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02610 
02611       // Prepare offset into DataConstant
02612       int offset_0 = tmp_0->getPointOffset(0,0);
02613       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02614 
02615       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02616       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02617 
02618       // Compute a result for the default
02619       tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02620       // Compute a result for each tag
02621       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02622       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02623       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02624         tmp_2->addTag(i->first);
02625         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02626         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02627         tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02628       }
02629 
02630     }
02631     else if (arg_0_Z.isConstant()   && arg_1_Z.isExpanded()) {
02632 
02633       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02634       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02635       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02636       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02637 
02638       int sampleNo_1;
02639       int numSamples_1 = arg_1_Z.getNumSamples();
02640       int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02641       int offset_0 = tmp_0->getPointOffset(0,0);
02642       const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02643       double ptr_0 = ptr_src[0];
02644       int size = size1*numDataPointsPerSample_1;
02645       res.requireWrite();
02646       #pragma omp parallel for private(sampleNo_1) schedule(static)
02647       for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02648 //        for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02649           int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
02650           int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
02651 //          const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02652           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02653           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02654           tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
02655 
02656 //        }
02657       }
02658 
02659     }
02660     else if (arg_0_Z.isTagged()     && arg_1_Z.isConstant()) {
02661 
02662       // Borrow DataTagged input from Data object
02663       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02664 
02665       // Prepare the DataConstant input
02666       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02667 
02668       // Prepare a DataTagged output 2
02669       res = Data(0.0, shape1, arg_0_Z.getFunctionSpace());      // DataTagged output
02670       res.tag();
02671       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02672 
02673       // Prepare offset into DataConstant
02674       int offset_1 = tmp_1->getPointOffset(0,0);
02675       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02676 
02677       // Get the pointers to the actual data
02678       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02679       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02680 
02681 
02682       // Compute a result for the default
02683       tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02684       // Compute a result for each tag
02685       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02686       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02687       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02688         tmp_2->addTag(i->first);
02689         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02690         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02691 
02692         tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02693       }
02694 
02695     }
02696     else if (arg_0_Z.isTagged()     && arg_1_Z.isTagged()) {
02697 
02698       // Borrow DataTagged input from Data object
02699       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02700 
02701       // Borrow DataTagged input from Data object
02702       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02703 
02704       // Prepare a DataTagged output 2
02705       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
02706       res.tag();        // DataTagged output
02707       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02708 
02709       // Get the pointers to the actual data
02710       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02711       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02712       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02713 
02714       // Compute a result for the default
02715       tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02716       // Merge the tags
02717       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02718       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02719       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02720       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02721         tmp_2->addTag(i->first); // use tmp_2 to get correct shape
02722       }
02723       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02724         tmp_2->addTag(i->first);
02725       }
02726       // Compute a result for each tag
02727       const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02728       for (i=lookup_2.begin();i!=lookup_2.end();i++) {
02729         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02730         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02731         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02732 
02733         tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02734       }
02735 
02736     }
02737     else if (arg_0_Z.isTagged()     && arg_1_Z.isExpanded()) {
02738 
02739       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02740       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02741       DataTagged*   tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02742       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02743       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02744 
02745       int sampleNo_0,dataPointNo_0;
02746       int numSamples_0 = arg_0_Z.getNumSamples();
02747       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02748       res.requireWrite();
02749       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02750       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02751         int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
02752         const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02753         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02754           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02755           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02756           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02757           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02758           tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02759         }
02760       }
02761 
02762     }
02763     else if (arg_0_Z.isExpanded()   && arg_1_Z.isConstant()) {
02764       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02765       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02766       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02767       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02768 
02769       int sampleNo_0,dataPointNo_0;
02770       int numSamples_0 = arg_0_Z.getNumSamples();
02771       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02772       int offset_1 = tmp_1->getPointOffset(0,0);
02773       res.requireWrite();
02774       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02775       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02776         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02777           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02778           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02779           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02780           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02781           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02782           tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02783         }
02784       }
02785 
02786 
02787     }
02788     else if (arg_0_Z.isExpanded()   && arg_1_Z.isTagged()) {
02789 
02790       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02791       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02792       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02793       DataTagged*   tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02794       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02795 
02796       int sampleNo_0,dataPointNo_0;
02797       int numSamples_0 = arg_0_Z.getNumSamples();
02798       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02799       res.requireWrite();
02800       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02801       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02802         int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
02803         const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02804         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02805           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02806           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02807           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02808           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02809           tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02810         }
02811       }
02812 
02813     }
02814     else if (arg_0_Z.isExpanded()   && arg_1_Z.isExpanded()) {
02815 
02816       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02817       res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02818       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02819       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02820       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02821 
02822       int sampleNo_0,dataPointNo_0;
02823       int numSamples_0 = arg_0_Z.getNumSamples();
02824       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02825       res.requireWrite();
02826       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02827       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02828         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02829           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02830           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02831           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02832           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02833           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02834           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02835           tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02836         }
02837       }
02838 
02839     }
02840     else {
02841       throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
02842     }
02843 
02844   } else if (0 == rank1) {
02845     if (arg_0_Z.isConstant()   && arg_1_Z.isConstant()) {
02846       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());      // DataConstant output
02847       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02848       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02849       double *ptr_2 = &(res.getDataAtOffsetRW(0));
02850       tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02851     }
02852     else if (arg_0_Z.isConstant()   && arg_1_Z.isTagged()) {
02853 
02854       // Prepare the DataConstant input
02855       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02856 
02857       // Borrow DataTagged input from Data object
02858       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02859 
02860       // Prepare a DataTagged output 2
02861       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());      // DataTagged output
02862       res.tag();
02863       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02864 
02865       // Prepare offset into DataConstant
02866       int offset_0 = tmp_0->getPointOffset(0,0);
02867       const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02868 
02869       //Get the pointers to the actual data
02870       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02871       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02872 
02873       // Compute a result for the default
02874       tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02875       // Compute a result for each tag
02876       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02877       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02878       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02879         tmp_2->addTag(i->first);
02880         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02881         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02882         tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02883       }
02884     }
02885     else if (arg_0_Z.isConstant()   && arg_1_Z.isExpanded()) {
02886 
02887       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02888       DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02889       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02890       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02891 
02892       int sampleNo_1,dataPointNo_1;
02893       int numSamples_1 = arg_1_Z.getNumSamples();
02894       int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02895       int offset_0 = tmp_0->getPointOffset(0,0);
02896       res.requireWrite();
02897       #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
02898       for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02899         for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02900           int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
02901           int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
02902           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02903           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02904           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02905           tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02906         }
02907       }
02908 
02909     }
02910     else if (arg_0_Z.isTagged()     && arg_1_Z.isConstant()) {
02911 
02912       // Borrow DataTagged input from Data object
02913       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02914 
02915       // Prepare the DataConstant input
02916       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02917 
02918       // Prepare a DataTagged output 2
02919       res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());      // DataTagged output
02920       res.tag();
02921       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02922 
02923       // Prepare offset into DataConstant
02924       int offset_1 = tmp_1->getPointOffset(0,0);
02925       const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02926       // Get the pointers to the actual data
02927       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02928       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02929       // Compute a result for the default
02930       tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02931       // Compute a result for each tag
02932       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02933       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02934       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02935         tmp_2->addTag(i->first);
02936         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02937         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02938         tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02939       }
02940 
02941     }
02942     else if (arg_0_Z.isTagged()     && arg_1_Z.isTagged()) {
02943 
02944       // Borrow DataTagged input from Data object
02945       DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02946 
02947       // Borrow DataTagged input from Data object
02948       DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02949 
02950       // Prepare a DataTagged output 2
02951       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02952       res.tag();        // DataTagged output
02953       DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02954 
02955       // Get the pointers to the actual data
02956       const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02957       const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02958       double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02959 
02960       // Compute a result for the default
02961       tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02962       // Merge the tags
02963       DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
02964       const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02965       const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02966       for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02967         tmp_2->addTag(i->first); // use tmp_2 to get correct shape
02968       }
02969       for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02970         tmp_2->addTag(i->first);
02971       }
02972       // Compute a result for each tag
02973       const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02974       for (i=lookup_2.begin();i!=lookup_2.end();i++) {
02975         const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02976         const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02977         double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02978         tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02979       }
02980 
02981     }
02982     else if (arg_0_Z.isTagged()     && arg_1_Z.isExpanded()) {
02983 
02984       // After finding a common function space above the two inputs have the same numSamples and num DPPS
02985       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
02986       DataTagged*   tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02987       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02988       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02989 
02990       int sampleNo_0,dataPointNo_0;
02991       int numSamples_0 = arg_0_Z.getNumSamples();
02992       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02993       res.requireWrite();
02994       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02995       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02996         int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
02997         const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02998         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02999           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
03000           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
03001           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03002           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03003           tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
03004         }
03005       }
03006 
03007     }
03008     else if (arg_0_Z.isExpanded()   && arg_1_Z.isConstant()) {
03009       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
03010       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
03011       DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
03012       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03013 
03014       int sampleNo_0;
03015       int numSamples_0 = arg_0_Z.getNumSamples();
03016       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03017       int offset_1 = tmp_1->getPointOffset(0,0);
03018       const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03019       double ptr_1 = ptr_src[0];
03020       int size = size0 * numDataPointsPerSample_0;
03021       res.requireWrite();
03022       #pragma omp parallel for private(sampleNo_0) schedule(static)
03023       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03024 //        for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03025           int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
03026           int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
03027           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03028 //          const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03029           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03030           tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
03031 //        }
03032       }
03033 
03034 
03035     }
03036     else if (arg_0_Z.isExpanded()   && arg_1_Z.isTagged()) {
03037 
03038       // After finding a common function space above the two inputs have the same numSamples and num DPPS
03039       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
03040       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
03041       DataTagged*   tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
03042       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03043 
03044       int sampleNo_0,dataPointNo_0;
03045       int numSamples_0 = arg_0_Z.getNumSamples();
03046       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03047       res.requireWrite();
03048       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
03049       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03050         int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
03051         const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03052         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03053           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
03054           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
03055           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03056           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03057           tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
03058         }
03059       }
03060 
03061     }
03062     else if (arg_0_Z.isExpanded()   && arg_1_Z.isExpanded()) {
03063 
03064       // After finding a common function space above the two inputs have the same numSamples and num DPPS
03065       res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
03066       DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
03067       DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
03068       DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03069 
03070       int sampleNo_0,dataPointNo_0;
03071       int numSamples_0 = arg_0_Z.getNumSamples();
03072       int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03073       res.requireWrite();
03074       #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
03075       for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03076         for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03077           int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
03078           int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
03079           int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
03080           const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03081           const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
03082           double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03083           tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
03084         }
03085       }
03086 
03087     }
03088     else {
03089       throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
03090     }
03091 
03092   } else {
03093     throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
03094   }
03095 
03096   return res;
03097 }
03098 
03099 template <typename UnaryFunction>
03100 Data
03101 C_TensorUnaryOperation(Data const &arg_0,
03102                        UnaryFunction operation)
03103 {
03104   if (arg_0.isEmpty())  // do this before we attempt to interpolate
03105   {
03106      throw DataException("Error - Operations not permitted on instances of DataEmpty.");
03107   }
03108   if (arg_0.isLazy())
03109   {
03110      throw DataException("Error - Operations not permitted on lazy data.");
03111   }
03112   // Interpolate if necessary and find an appropriate function space
03113   Data arg_0_Z = Data(arg_0);
03114 
03115   // Get rank and shape of inputs
03116   const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
03117   int size0 = arg_0_Z.getDataPointSize();
03118 
03119   // Declare output Data object
03120   Data res;
03121 
03122   if (arg_0_Z.isConstant()) {
03123     res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());      // DataConstant output
03124     const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
03125     double *ptr_2 = &(res.getDataAtOffsetRW(0));
03126     tensor_unary_operation(size0, ptr_0, ptr_2, operation);
03127   }
03128   else if (arg_0_Z.isTagged()) {
03129 
03130     // Borrow DataTagged input from Data object
03131     DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
03132 
03133     // Prepare a DataTagged output 2
03134     res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());   // DataTagged output
03135     res.tag();
03136     DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
03137 
03138     // Get the pointers to the actual data
03139     const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
03140     double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
03141     // Compute a result for the default
03142     tensor_unary_operation(size0, ptr_0, ptr_2, operation);
03143     // Compute a result for each tag
03144     const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
03145     DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
03146     for (i=lookup_0.begin();i!=lookup_0.end();i++) {
03147       tmp_2->addTag(i->first);
03148       const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
03149       double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
03150       tensor_unary_operation(size0, ptr_0, ptr_2, operation);
03151     }
03152 
03153   }
03154   else if (arg_0_Z.isExpanded()) {
03155 
03156     res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
03157     DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
03158     DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
03159 
03160     int sampleNo_0,dataPointNo_0;
03161     int numSamples_0 = arg_0_Z.getNumSamples();
03162     int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
03163     #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
03164     for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
03165     dataPointNo_0=0;
03166 //      for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
03167         int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
03168         int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
03169         const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
03170         double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
03171         tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
03172 //      }
03173     }
03174   }
03175   else {
03176     throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
03177   }
03178 
03179   return res;
03180 }
03181 
03182 }
03183 #endif