Coprocessing example: Difference between revisions

From KitwarePublic
Jump to navigationJump to search
(Update CMakeLists.txt for paraview modularization before 4.0 release.)
 
(2 intermediate revisions by the same user not shown)
Line 137: Line 137:
FIND_PACKAGE(ParaView REQUIRED)
FIND_PACKAGE(ParaView REQUIRED)
INCLUDE(${PARAVIEW_USE_FILE})
INCLUDE(${PARAVIEW_USE_FILE})
INCLUDE_DIRECTORIES(${VTK_MPI_INCLUDE_DIR})
IF(NOT VTK_MPI_INCLUDE_DIR)
  MESSAGE(SEND_ERROR "Must build ParaView with MPI.")
ENDIF()


ADD_EXECUTABLE(CoProcessingExample CoProcessingExample.cxx)
ADD_EXECUTABLE(CoProcessingExample CoProcessingExample.cxx)
TARGET_LINK_LIBRARIES(CoProcessingExample vtkCoProcessor)
TARGET_LINK_LIBRARIES(CoProcessingExample vtkCoProcessorImplementation)
</source>
</source>


Line 187: Line 182:


def GetNextProxyToDelete():
def GetNextProxyToDelete():
     iter = servermanager.vtkSMProxyIterator()
     proxyiterator = servermanager.ProxyIterator()
     iter.Begin()
     for proxy in proxyiterator:
    while not iter.IsAtEnd():
        group = proxyiterator.GetGroup()
         if iter.GetGroup().find("prototypes") != -1:
         if group.find("prototypes") != -1:
            iter.Next()
             continue
             continue
        proxy = servermanager._getPyProxy(iter.GetProxy())
         if group != 'timekeeper' and group.find("pq_helper_proxies") == -1 :
        proxygroup = iter.GetGroup()
        iter.Next()
         if proxygroup != 'timekeeper' and proxy != None and proxygroup.find("pq_helper_proxies") == -1 :
             return proxy
             return proxy
     return None
     return None


Line 264: Line 254:


def GetNextProxyToDelete():
def GetNextProxyToDelete():
     iter = servermanager.vtkSMProxyIterator()
     proxyiterator = servermanager.ProxyIterator()
     iter.Begin()
     for proxy in proxyiterator:
    while not iter.IsAtEnd():
        group = proxyiterator.GetGroup()
         if iter.GetGroup().find("prototypes") != -1:
         if group.find("prototypes") != -1:
            iter.Next()
             continue
             continue
        proxy = servermanager._getPyProxy(iter.GetProxy())
         if group != 'timekeeper' and group.find("pq_helper_proxies") == -1 :
        proxygroup = iter.GetGroup()
        iter.Next()
         if proxygroup != 'timekeeper' and proxy != None and proxygroup.find("pq_helper_proxies") == -1 :
             return proxy
             return proxy
     return None
     return None



Latest revision as of 01:56, 14 September 2012

This example is used to demonstrate how the co-processing library can be used with a C++ based simulation code. In the ParaView/CoProcessing/Adaptors/FortranAdaptors directory there is code useful for integrating C or Fortran based simulation codes withthe co-processing library. Note that this example requires MPI to be available on your system. The executable takes in a python coprocessing script and a number of time steps to be run for. Note to remember to set your system environment properly. See [[1]] for details.


CoProcessingExample.cxx

<source lang="cpp">

  1. include "vtkCPDataDescription.h"
  2. include "vtkCPInputDataDescription.h"
  3. include "vtkCPProcessor.h"
  4. include "vtkCPPythonScriptPipeline.h"
  5. include "vtkElevationFilter.h"
  6. include "vtkPolyData.h"
  7. include "vtkSmartPointer.h"
  8. include "vtkSphereSource.h"
  9. include "vtkXMLUnstructuredGridReader.h"
  1. include <mpi.h>
  2. include <string>

class DataGenerator { public:

 DataGenerator()
   {
   this->Sphere = vtkSmartPointer<vtkSphereSource>::New();
   this->Sphere->SetThetaResolution(30);
   this->Sphere->SetPhiResolution(30);
   int procId;
   MPI_Comm_rank(MPI_COMM_WORLD, &procId);
   this->Sphere->SetCenter(procId*4.0, 0, 0);
   this->Elevation = vtkSmartPointer<vtkElevationFilter>::New();
   this->Elevation->SetInputConnection(this->Sphere->GetOutputPort());
   this->Index = 0;
   }
 vtkSmartPointer<vtkPolyData> GetNext()
   {
   double radius = fabs(sin(0.1 * this->Index));
   this->Index++;
   this->Sphere->SetRadius(1.0 + radius);
   this->Elevation->Update();
   vtkSmartPointer<vtkPolyData> ret = vtkSmartPointer<vtkPolyData>::New();
   ret->DeepCopy(this->Elevation->GetOutput());
   return ret;
   }

protected:

 int Index;
 vtkSmartPointer<vtkSphereSource> Sphere;
 vtkSmartPointer<vtkElevationFilter> Elevation;

};

int main(int argc, char* argv[]) {

 if (argc < 3)
   {
   printf("Usage: %s <python coprocessing script> <number of time steps>\n", argv[0]);
   return 1;
   }
 // we assume that this is done in parallel
 MPI_Init(&argc, &argv);
 std::string cpPythonFile = argv[1];
 int nSteps = atoi(argv[2]);
 vtkCPProcessor* processor = vtkCPProcessor::New();
 processor->Initialize();
 vtkCPPythonScriptPipeline* pipeline = vtkCPPythonScriptPipeline::New();
 // read the coprocessing python file
 if(pipeline->Initialize(cpPythonFile.c_str()) == 0)
   {
   cout << "Problem reading the python script.\n";
   return 1;
   }
 processor->AddPipeline(pipeline);
 pipeline->Delete();
 if (nSteps == 0)
   {
   return 0;
   }
 // create a data source, typically this will come from the adaptor
 // but here we use generator to create it ourselves
 DataGenerator generator;
 // do coprocessing
 double tStart = 0.0;
 double tEnd = 1.0;
 double stepSize = (tEnd - tStart)/nSteps;
 vtkCPDataDescription* dataDesc = vtkCPDataDescription::New();
 dataDesc->AddInput("input");
 for (int i = 0; i < nSteps; ++i)
   {
   double currentTime = tStart + stepSize*i;
   // set the current time and time step
   dataDesc->SetTimeData(currentTime, i);
   // check if the script says we should do coprocessing now
   if(processor->RequestDataDescription(dataDesc) != 0)
     {
     // we are going to do coprocessing so use generator to
     // create our grid at this timestep and provide it to
     // the coprocessing library
     vtkSmartPointer<vtkDataObject> dataObject =
       generator.GetNext();
     dataDesc->GetInputDescriptionByName("input")->SetGrid(dataObject);
     processor->CoProcess(dataDesc);
     }
   }
 dataDesc->Delete();
 processor->Finalize();
 processor->Delete();
 MPI_Finalize();
 return 0;

} </source>

CMakeLists.txt

<source lang="cmake"> cmake_minimum_required(VERSION 2.6)

PROJECT(CoProcessingExample)

FIND_PACKAGE(ParaView REQUIRED) INCLUDE(${PARAVIEW_USE_FILE})

ADD_EXECUTABLE(CoProcessingExample CoProcessingExample.cxx) TARGET_LINK_LIBRARIES(CoProcessingExample vtkCoProcessorImplementation) </source>

Python Scripts

The first python script below is used to just output the actual results of the example. This would correspond to a simulation run with a coarse grid in order to set up coprocessing runs for larger grids where outputting the entire simulation results can be computationally prohibitive.

<source lang="python"> try: paraview.simple except: from paraview.simple import *

def RequestDataDescription(datadescription):

   "Callback to populate the request for current timestep"
   timestep = datadescription.GetTimeStep()
   input_name = 'input'
   if (timestep % 1 == 0) :
       datadescription.GetInputDescriptionByName(input_name).AllFieldsOn()
       datadescription.GetInputDescriptionByName(input_name).GenerateMeshOn()
   else:
       datadescription.GetInputDescriptionByName(input_name).AllFieldsOff()
       datadescription.GetInputDescriptionByName(input_name).GenerateMeshOff()

def DoCoProcessing(datadescription):

   "Callback to do co-processing for current timestep"
   cp_writers = []
   timestep = datadescription.GetTimeStep()
   grid = CreateProducer( datadescription, "input" )
   ParallelPolyDataWriter1 = CreateWriter( XMLPPolyDataWriter, "input_grid_%t.pvtp", 1, cp_writers )
   for writer in cp_writers:
       if timestep % writer.cpFrequency == 0:
           writer.FileName = writer.cpFileName.replace("%t", str(timestep))
           writer.UpdatePipeline()
   # explicitly delete the proxies -- we do it this way to avoid problems with prototypes
   tobedeleted = GetNextProxyToDelete()
   while tobedeleted != None:
       Delete(tobedeleted)
       tobedeleted = GetNextProxyToDelete()

def GetNextProxyToDelete():

   proxyiterator = servermanager.ProxyIterator()
   for proxy in proxyiterator:
       group = proxyiterator.GetGroup()
       if group.find("prototypes") != -1:
           continue
       if group != 'timekeeper' and group.find("pq_helper_proxies") == -1 :
           return proxy
   return None

def CreateProducer(datadescription, gridname):

   "Creates a producer proxy for the grid"
   if not datadescription.GetInputDescriptionByName(gridname):
       raise RuntimeError, "Simulation input name '%s' does not exist" % gridname
   grid = datadescription.GetInputDescriptionByName(gridname).GetGrid()
   producer = TrivialProducer()
   producer.GetClientSideObject().SetOutput(grid)
   producer.UpdatePipeline()
   return producer

def CreateWriter(proxy_ctor, filename, freq, cp_writers):

   writer = proxy_ctor()
   writer.FileName = filename
   writer.add_attribute("cpFrequency", freq)
   writer.add_attribute("cpFileName", filename)
   cp_writers.append(writer)
   return writer

</source>


This second script is still rather simple and only performs a cut on the input from the simulation code. It demonstrates though how desired results can be obtained while performing coprocessing at specified time steps.

<source lang="python"> try: paraview.simple except: from paraview.simple import *

def RequestDataDescription(datadescription):

   "Callback to populate the request for current timestep"
   timestep = datadescription.GetTimeStep()
   input_name = 'input'
   if (timestep % 5 == 0) :
       datadescription.GetInputDescriptionByName(input_name).AllFieldsOn()
       datadescription.GetInputDescriptionByName(input_name).GenerateMeshOn()
   else:
       datadescription.GetInputDescriptionByName(input_name).AllFieldsOff()
       datadescription.GetInputDescriptionByName(input_name).GenerateMeshOff()

def DoCoProcessing(datadescription):

   "Callback to do co-processing for current timestep"
   cp_writers = []
   timestep = datadescription.GetTimeStep()
   grid = CreateProducer( datadescription, "input" )
   Clip2 = Clip( guiName="Clip2", InsideOut=0, UseValueAsOffset=0, Scalars=['POINTS', 'Elevation'], Value=0.0, ClipType="Plane" )
   Clip2.ClipType.Normal = [0.0, 1.0, 0.0]
   Clip2.ClipType.Origin = [1.9999999105930328, 0.0, 0.0]
   Clip2.ClipType.Offset = 0.0
   ParallelUnstructuredGridWriter2 = CreateWriter( XMLPUnstructuredGridWriter, "Cut_%t.pvtu", 5, cp_writers )
   for writer in cp_writers:
       if timestep % writer.cpFrequency == 0:
           writer.FileName = writer.cpFileName.replace("%t", str(timestep))
           writer.UpdatePipeline()
   # explicitly delete the proxies -- we do it this way to avoid problems with prototypes
   tobedeleted = GetNextProxyToDelete()
   while tobedeleted != None:
       Delete(tobedeleted)
       tobedeleted = GetNextProxyToDelete()

def GetNextProxyToDelete():

   proxyiterator = servermanager.ProxyIterator()
   for proxy in proxyiterator:
       group = proxyiterator.GetGroup()
       if group.find("prototypes") != -1:
           continue
       if group != 'timekeeper' and group.find("pq_helper_proxies") == -1 :
           return proxy
   return None

def CreateProducer(datadescription, gridname):

   "Creates a producer proxy for the grid"
   if not datadescription.GetInputDescriptionByName(gridname):
       raise RuntimeError, "Simulation input name '%s' does not exist" % gridname
   grid = datadescription.GetInputDescriptionByName(gridname).GetGrid()
   producer = TrivialProducer()
   producer.GetClientSideObject().SetOutput(grid)
   producer.UpdatePipeline()
   return producer

def CreateWriter(proxy_ctor, filename, freq, cp_writers):

   writer = proxy_ctor()
   writer.FileName = filename
   writer.add_attribute("cpFrequency", freq)
   writer.add_attribute("cpFileName", filename)
   cp_writers.append(writer)
   return writer

</source>