ForEachVoxelFunction.py
1 #! /usr/bin/env python
2 
3 from __future__ import absolute_import, print_function, unicode_literals
4 
5 import sys
6 
7 # ------------------------------------------------------------------------------
8 def to_nary_string(arity):
9  return str(arity) + '-ary'
10 
11 # ------------------------------------------------------------------------------
12 def string_to_arity(arity):
13  arity = arity.lower()
14  if arity == 'unary': return 1
15  elif arity == 'binary': return 2
16  elif arity == 'ternary': return 3
17  elif arity == 'quaternary': return 4
18  elif arity == 'quinary': return 5
19  elif arity == 'senary': return 6
20  elif arity == 'septenary': return 7
21  elif arity == 'octary': return 8
22  elif arity == 'nonary': return 9
23  return -1
24 
25 # ------------------------------------------------------------------------------
26 def arity_to_string(arity):
27  if arity == 1: return 'unary'
28  elif arity == 2: return 'binary'
29  elif arity == 3: return 'ternary'
30  elif arity == 4: return 'quaternary'
31  elif arity == 5: return 'quinary'
32  elif arity == 6: return 'senary'
33  elif arity == 7: return 'septenary'
34  elif arity == 8: return 'octary'
35  elif arity == 9: return 'nonary'
36  return to_nary_string(arity)
37 
38 # ------------------------------------------------------------------------------
39 def to_valid_symbol_name(s):
40  return s.replace('-', '')
41 
42 # ------------------------------------------------------------------------------
43 def get_source_name(arity):
44  return 'mirtkForEach' + to_valid_symbol_name(arity_to_string(arity)).title() + 'VoxelFunction'
45 
46 # ------------------------------------------------------------------------------
47 # parse command-line arguments
48 if len(sys.argv) != 3:
49  print("usage: " + sys.argv[0] + ' <arity> <file>')
50  sys.exit(1)
51 try:
52  arity = int(sys.argv[1])
53 except ValueError:
54  arity = string_to_arity(sys.argv[1])
55 if arity < 1:
56  sys.stderr.write('Input argument must be either arity as positive number or a string such as "unary" or "binary"!\n')
57  sys.exit(1)
58 f = open(sys.argv[2], 'w')
59 if not f:
60  sys.stderr.write('Failed to open file ' + sys.argv[2] + '!\n')
61 
62 # ------------------------------------------------------------------------------
63 # source file header
64 source_name = get_source_name(arity)
65 f.write("""/*
66  * Medical Image Registration ToolKit (MIRTK)
67  *
68  * Copyright 2013-2015 Imperial College London
69  * Copyright 2013-2015 Andreas Schuh
70  *
71  * Licensed under the Apache License, Version 2.0 (the "License");
72  * you may not use this file except in compliance with the License.
73  * You may obtain a copy of the License at
74  *
75  * http://www.apache.org/licenses/LICENSE-2.0
76  *
77  * Unless required by applicable law or agreed to in writing, software
78  * distributed under the License is distributed on an "AS IS" BASIS,
79  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
80  * See the License for the specific language governing permissions and
81  * limitations under the License.
82  *
83  * ATTENTION: This source file has been automatically generated using the code
84  * generator mirtkForEachVoxelFunction.py! This generator is
85  * invoked during CMake configuration of the build system when this
86  * source file is missing from the project.
87  *
88  * DO NOT modify this file manually. Instead, modify the code
89  * generator, remove any existing mirtkForEach*VoxelFunction.h
90  * header file from the include/ directory and then re-run CMake.
91  * This will invoke the code generator to re-generate the source files.
92  */
93 
94 """)
95 # include guard
96 f.write('#ifndef MIRTK_' + source_name[5:] + '_H\n')
97 f.write('#define MIRTK_' + source_name[5:] + '_H\n')
98 # include statements
99 f.write("""
100 #include "mirtk/Stream.h"
101 #include "mirtk/VoxelFunction.h"
102 
103 
104 namespace mirtk {
105 
106 """)
107 
108 # ------------------------------------------------------------------------------
109 # generate ForEach function for each combination of const and non-const image arguments
110 def gencode(arity, num_const):
111  arity_string = arity_to_string(arity).title()
112  cfg = {}
113  # use last image as reference for image size and inside check as it is usually
114  # the output image
115  cfg['refim'] = 'im' + str(arity)
116  cfg['refp'] = 'p' + str(arity)
117  # other settings
118  if num_const == 0: cfg['num_const_comment'] = str(arity) + ' non-const'
119  elif num_const == arity: cfg['num_const_comment'] = str(arity) + ' const'
120  else: cfg['num_const_comment'] = str(num_const) + ' const, ' + str(arity - num_const) + ' non-const'
121  if arity > 1: cfg['num_const_comment'] += ' images'
122  else: cfg['num_const_comment'] += ' image'
123  cfg['class_name1'] = arity_string + 'ForEachVoxelBody'
124  cfg['class_name2'] = arity_string + 'ForEachVoxelIfBody'
125  if num_const > 0:
126  cfg['class_name1'] += '_'
127  cfg['class_name2'] += '_'
128  if num_const < arity:
129  cfg['class_name1'] += str(num_const)
130  cfg['class_name2'] += str(num_const)
131  cfg['class_name1'] += 'Const'
132  cfg['class_name2'] += 'Const'
133  cfg['member_declaration'] = ''
134  cfg['class_T'] = ''
135  cfg['T'] = ''
136  cfg['constructor_args1'] = ''
137  cfg['constructor_args2'] = ''
138  cfg['init_list'] = ''
139  cfg['copy_list'] = ''
140  cfg['init_pointers'] = ''
141  cfg['init_pointers_1D'] = ''
142  cfg['init_pointers_2D'] = ''
143  cfg['init_pointers_3D'] = ''
144  cfg['preincrement_pointers'] = ''
145  cfg['inc_pointers_col'] = ''
146  cfg['inc_pointers_row'] = ''
147  cfg['inc_pointers_page'] = ''
148  cfg['pargs'] = ''
149  cfg['imparams_by_reference'] = ''
150  cfg['imargs'] = ''
151  cfg['impargs'] = ''
152  for i in range(1, arity+1):
153  n = str(i)
154  if i > 1:
155  cfg['member_declaration'] += '\n' + (' ' * 2)
156  cfg['class_T'] += ', '
157  cfg['T'] += ', '
158  cfg['constructor_args1'] += ',\n' + (' ' * (2 + len(cfg['class_name1']) + 1))
159  cfg['constructor_args2'] += ',\n' + (' ' * (2 + len(cfg['class_name2']) + 1))
160  cfg['init_list'] += ', '
161  cfg['copy_list'] += ', '
162  cfg['init_pointers'] += '\n' + (' ' * 4)
163  cfg['init_pointers_1D'] += '\n' + (' ' * 4)
164  cfg['init_pointers_2D'] += '\n' + (' ' * 4)
165  cfg['init_pointers_3D'] += '\n' + (' ' * 4)
166  cfg['preincrement_pointers'] += ', '
167  cfg['inc_pointers_col'] += ', '
168  cfg['inc_pointers_row'] += ', '
169  cfg['inc_pointers_page'] += ', '
170  cfg['pargs'] += ', '
171  cfg['imparams_by_reference'] += ', '
172  cfg['imargs'] += ', '
173  cfg['impargs'] += ', '
174  if num_const > 0:
175  if i <= num_const:
176  cfg['member_declaration'] += 'const '
177  cfg['constructor_args1'] += 'const '
178  cfg['constructor_args2'] += 'const '
179  cfg['init_pointers'] += 'const '
180  cfg['init_pointers_1D'] += 'const '
181  cfg['init_pointers_2D'] += 'const '
182  cfg['init_pointers_3D'] += 'const '
183  cfg['imparams_by_reference'] += 'const '
184  else:
185  cfg['member_declaration'] += ' '
186  cfg['constructor_args1'] += ' '
187  cfg['constructor_args2'] += ' '
188  cfg['init_pointers'] += ' '
189  cfg['init_pointers_1D'] += ' '
190  cfg['init_pointers_2D'] += ' '
191  cfg['init_pointers_3D'] += ' '
192  cfg['member_declaration'] += 'GenericImage<T' + n + '> &im' + n + ';'
193  cfg['imparams_by_reference'] += 'GenericImage<T' + n + '> &im' + n
194  cfg['class_T'] += 'class T' + n
195  cfg['T'] += 'T' + n
196  cfg['constructor_args1'] += 'GenericImage<T' + n + '> &im' + n
197  cfg['constructor_args2'] += 'GenericImage<T' + n + '> &im' + n
198  cfg['init_list'] += 'im' + n + '(im' + n + ')'
199  cfg['copy_list'] += 'im' + n + '(o.im' + n + ')'
200  cfg['init_pointers'] += 'T' + n + ' *p' + n + ' = im' + n + '.IsEmpty() ? NULL : im' + n + '.GetPointerToVoxels();'
201  cfg['init_pointers_1D'] += 'T' + n + ' *p' + n + ' = im' + n + '.IsEmpty() ? NULL : im' + n + '.GetPointerToVoxels() + re.begin();'
202  cfg['init_pointers_2D'] += 'T' + n + ' *p' + n + ' = im' + n + '.IsEmpty() ? NULL : im' + n + '.GetPointerToVoxels(bi, bj, this->_k, this->_l);'
203  cfg['init_pointers_3D'] += 'T' + n + ' *p' + n + ' = im' + n + '.IsEmpty() ? NULL : im' + n + '.GetPointerToVoxels(bi, bj, bk, this->_l);'
204  cfg['preincrement_pointers'] += '++p' + n
205  cfg['inc_pointers_col'] += 'p' + n + ' += 1'
206  cfg['inc_pointers_row'] += 'p' + n + ' += s1'
207  cfg['inc_pointers_page'] += 'p' + n + ' += s2'
208  cfg['pargs'] += 'p' + n
209  cfg['imargs'] += 'im' + n
210  cfg['impargs'] += '*im' + n
211  cfg['constructor_args1'] += ',\n' + (' ' * (2 + len(cfg['class_name1']) + 1))
212  cfg['constructor_args2'] += ',\n' + (' ' * (2 + len(cfg['class_name2']) + 1))
213  cfg['constructor_args1'] += 'VoxelFunc &vf'
214  cfg['constructor_args2'] += 'VoxelFunc &vf, OutsideFunc &of'
215  cfg['imparams_by_pointer'] = cfg['imparams_by_reference'].replace('&', '*')
216  cfg['assert_is_not_reduction'] = "if (VoxelFunc::IsReduction()) _foreach%svoxelfunction_must_not_be_reduction();" % arity_string.lower()
217  cfg['assert_neither_is_not_reduction'] = "if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) _foreach%svoxelfunction_must_not_be_reduction();" % arity_string.lower()
218  f.write("""
219 // =============================================================================
220 // %(num_const_comment)s
221 // =============================================================================
222 
223 // -----------------------------------------------------------------------------
224 /**
225  * ForEachVoxel body for voxel function of %(num_const_comment)s
226  */
227 template <%(class_T)s, class VoxelFunc>
228 struct %(class_name1)s : public ForEachVoxelBody<VoxelFunc>
229 {
230  %(member_declaration)s
231 
232  /// Constructor
233  %(class_name1)s(%(constructor_args1)s)
234  :
235  ForEachVoxelBody<VoxelFunc>(vf, im1.Attributes()), %(init_list)s
236  {}
237 
238  /// Copy constructor
239  %(class_name1)s(const %(class_name1)s &o)
240  :
241  ForEachVoxelBody<VoxelFunc>(o), %(copy_list)s
242  {}
243 
244  /// Split constructor
245  %(class_name1)s(%(class_name1)s &o, split s)
246  :
247  ForEachVoxelBody<VoxelFunc>(o, s), %(copy_list)s
248  {}
249 
250  /// Process entire image
251  void operator ()(const ImageAttributes &attr) const
252  {
253  %(init_pointers)s
254 
255  const int T = (attr._dt ? attr._t : 1);
256 
257  for (int l = 0; l < T; ++l)
258  for (int k = 0; k < attr._z; ++k)
259  for (int j = 0; j < attr._y; ++j)
260  for (int i = 0; i < attr._x; ++i, %(preincrement_pointers)s) {
261  // const_cast such that voxel functions need only implement
262  // non-const operator() which is required for parallel_reduce
263  const_cast<%(class_name1)s *>(this)->_VoxelFunc(i, j, k, l, %(pargs)s);
264  }
265  }
266 
267  /// Process image region using linear index
268  void operator ()(const blocked_range<int> &re) const
269  {
270  %(init_pointers_1D)s
271 
272  for (int idx = re.begin(); idx < re.end(); ++idx, %(inc_pointers_col)s) {
273  // const_cast such that voxel functions need only implement
274  // non-const operator() which is required for parallel_reduce
275  const_cast<%(class_name1)s *>(this)->_VoxelFunc(%(refim)s, idx, %(pargs)s);
276  }
277  }
278 
279  /// Process 2D image region
280  void operator ()(const blocked_range2d<int> &re) const
281  {
282  const int bi = re.cols().begin();
283  const int bj = re.rows().begin();
284  const int ei = re.cols().end();
285  const int ej = re.rows().end();
286 
287  const int s1 = %(refim)s.GetX() - (ei - bi);
288 
289  %(init_pointers_2D)s
290 
291  for (int j = bj; j < ej; ++j, %(inc_pointers_row)s)
292  for (int i = bi; i < ei; ++i, %(inc_pointers_col)s) {
293  // const_cast such that voxel functions need only implement
294  // non-const operator() which is required for parallel_reduce
295  const_cast<%(class_name1)s *>(this)->_VoxelFunc(i, j, this->_k, this->_l, %(pargs)s);
296  }
297  }
298 
299  /// Process 3D image region
300  void operator ()(const blocked_range3d<int> &re) const
301  {
302  const int bi = re.cols ().begin();
303  const int bj = re.rows ().begin();
304  const int bk = re.pages().begin();
305  const int ei = re.cols ().end();
306  const int ej = re.rows ().end();
307  const int ek = re.pages().end();
308 
309  const int s1 = %(refim)s.GetX() - (ei - bi);
310  const int s2 = (%(refim)s.GetY() - (ej - bj)) * %(refim)s.GetX();
311 
312  %(init_pointers_3D)s
313 
314  for (int k = bk; k < ek; ++k, %(inc_pointers_page)s)
315  for (int j = bj; j < ej; ++j, %(inc_pointers_row)s)
316  for (int i = bi; i < ei; ++i, %(inc_pointers_col)s) {
317  // const_cast such that voxel functions need only implement
318  // non-const operator() which is required for parallel_reduce
319  const_cast<%(class_name1)s *>(this)->_VoxelFunc(i, j, k, this->_l, %(pargs)s);
320  }
321  }
322 };
323 
324 // -----------------------------------------------------------------------------
325 /**
326  * ForEachVoxel body for inside and outside unary voxel function of %(num_const_comment)s
327  */
328 template <%(class_T)s,
329  class VoxelFunc, class OutsideFunc = NaryVoxelFunction::NOP,
330  class Domain = ForEachVoxelDomain::Foreground>
331 struct %(class_name2)s : public ForEachVoxelIfBody<VoxelFunc, OutsideFunc>
332 {
333  %(member_declaration)s
334 
335  /// Constructor
336  %(class_name2)s(%(constructor_args2)s)
337  :
338  ForEachVoxelIfBody<VoxelFunc, OutsideFunc>(vf, of, im1.Attributes()), %(init_list)s
339  {}
340 
341  /// Copy constructor
342  %(class_name2)s(const %(class_name2)s &o)
343  :
344  ForEachVoxelIfBody<VoxelFunc, OutsideFunc>(o), %(copy_list)s
345  {}
346 
347  /// Split constructor
348  %(class_name2)s(%(class_name2)s &o, split s)
349  :
350  ForEachVoxelIfBody<VoxelFunc, OutsideFunc>(o, s), %(copy_list)s
351  {}
352 
353  /// Process entire image
354  void operator ()(const ImageAttributes &attr) const
355  {
356  %(init_pointers)s
357 
358  const int T = (attr._dt ? attr._t : 1);
359 
360  for (int l = 0; l < T; ++l)
361  for (int k = 0; k < attr._z; ++k)
362  for (int j = 0; j < attr._y; ++j)
363  for (int i = 0; i < attr._x; ++i, %(preincrement_pointers)s) {
364  if (Domain::IsInside(%(refim)s, i, j, k, l, %(refp)s)) {
365  // const_cast such that voxel functions need only implement
366  // non-const operator() which is required for parallel_reduce
367  const_cast<%(class_name2)s *>(this)->_VoxelFunc (i, j, k, l, %(pargs)s);
368  } else const_cast<%(class_name2)s *>(this)->_OutsideFunc(i, j, k, l, %(pargs)s);
369  }
370  }
371 
372  /// Process image region using linear index
373  void operator ()(const blocked_range<int> &re) const
374  {
375  %(init_pointers_1D)s
376 
377  for (int idx = re.begin(); idx < re.end(); ++idx, %(inc_pointers_col)s) {
378  if (Domain::IsInside(%(refim)s, idx, %(refp)s)) {
379  // const_cast such that voxel functions need only implement
380  // non-const operator() which is required for parallel_reduce
381  const_cast<%(class_name2)s *>(this)->_VoxelFunc (%(refim)s, idx, %(pargs)s);
382  } else const_cast<%(class_name2)s *>(this)->_OutsideFunc(%(refim)s, idx, %(pargs)s);
383  }
384  }
385 
386  /// Process 2D image region
387  void operator ()(const blocked_range2d<int> &re) const
388  {
389  const int bi = re.cols().begin();
390  const int bj = re.rows().begin();
391  const int ei = re.cols().end();
392  const int ej = re.rows().end();
393 
394  const int s1 = %(refim)s.GetX() - (ei - bi);
395 
396  %(init_pointers_2D)s
397 
398  for (int j = bj; j < ej; ++j, %(inc_pointers_row)s)
399  for (int i = bi; i < ei; ++i, %(inc_pointers_col)s) {
400  if (Domain::IsInside(%(refim)s, i, j, this->_k, this->_l, %(refp)s)) {
401  // const_cast such that voxel functions need only implement
402  // non-const operator() which is required for parallel_reduce
403  const_cast<%(class_name2)s *>(this)->_VoxelFunc (i, j, this->_k, this->_l, %(pargs)s);
404  } else const_cast<%(class_name2)s *>(this)->_OutsideFunc(i, j, this->_k, this->_l, %(pargs)s);
405  }
406  }
407 
408  /// Process 3D image region
409  void operator ()(const blocked_range3d<int> &re) const
410  {
411  const int bi = re.cols ().begin();
412  const int bj = re.rows ().begin();
413  const int bk = re.pages().begin();
414  const int ei = re.cols ().end();
415  const int ej = re.rows ().end();
416  const int ek = re.pages().end();
417 
418  const int s1 = %(refim)s.GetX() - (ei - bi);
419  const int s2 = (%(refim)s.GetY() - (ej - bj)) * %(refim)s.GetX();
420 
421  %(init_pointers_3D)s
422 
423  for (int k = bk; k < ek; ++k, %(inc_pointers_page)s)
424  for (int j = bj; j < ej; ++j, %(inc_pointers_row)s)
425  for (int i = bi; i < ei; ++i, %(inc_pointers_col)s) {
426  if (Domain::IsInside(%(refim)s, i, j, k, this->_l, %(refp)s)) {
427  // const_cast such that voxel functions need only implement
428  // non-const operator() which is required for parallel_reduce
429  const_cast<%(class_name2)s *>(this)->_VoxelFunc (i, j, k, this->_l, %(pargs)s);
430  } else const_cast<%(class_name2)s *>(this)->_OutsideFunc(i, j, k, this->_l, %(pargs)s);
431  }
432  }
433 };
434 
435 // -----------------------------------------------------------------------------
436 // ForEachVoxel
437 // -----------------------------------------------------------------------------
438 
439 //
440 // Image arguments by pointer
441 //
442 
443 // -----------------------------------------------------------------------------
444 template <%(class_T)s, class VoxelFunc>
445 void ForEachScalar(%(imparams_by_pointer)s, VoxelFunc &vf)
446 {
447  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
448  blocked_range<int> re(0, %(refim)s->GetNumberOfVoxels());
449  body(re);
450  vf.join(body._VoxelFunc);
451 }
452 
453 // -----------------------------------------------------------------------------
454 template <%(class_T)s, class VoxelFunc>
455 void ForEachScalar(VoxelFunc vf, %(imparams_by_pointer)s)
456 {
457  %(assert_is_not_reduction)s
458  ForEachScalar(%(impargs)s, vf);
459 }
460 
461 // -----------------------------------------------------------------------------
462 template <%(class_T)s, class VoxelFunc>
463 void ForEachVoxel(%(imparams_by_pointer)s, VoxelFunc &vf)
464 {
465  if (%(refim)s->GetTSize()) {
466  ForEachScalar(%(impargs)s, vf);
467  } else {
468  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
469  blocked_range<int> re(0, %(refim)s->GetNumberOfVoxels() / %(refim)s->GetT());
470  body(re);
471  vf.join(body._VoxelFunc);
472  }
473 }
474 
475 // -----------------------------------------------------------------------------
476 template <%(class_T)s, class VoxelFunc>
477 void ForEachVoxel(VoxelFunc vf, %(imparams_by_pointer)s)
478 {
479  %(assert_is_not_reduction)s
480  ForEachVoxel(%(impargs)s, vf);
481 }
482 
483 // -----------------------------------------------------------------------------
484 template <%(class_T)s, class VoxelFunc>
485 void ForEachVoxel(const ImageAttributes &attr, %(imparams_by_pointer)s, VoxelFunc &vf)
486 {
487  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
488  body(attr);
489  vf.join(body._VoxelFunc);
490 }
491 
492 // -----------------------------------------------------------------------------
493 template <%(class_T)s, class VoxelFunc>
494 void ForEachVoxel(VoxelFunc vf, const ImageAttributes &attr, %(imparams_by_pointer)s)
495 {
496  %(assert_is_not_reduction)s
497  ForEachVoxel(attr, %(impargs)s, vf);
498 }
499 
500 // -----------------------------------------------------------------------------
501 template <%(class_T)s, class VoxelFunc>
502 void ForEachVoxel(const blocked_range<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
503 {
504  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
505  body(re);
506  vf.join(body._VoxelFunc);
507 }
508 
509 // -----------------------------------------------------------------------------
510 template <%(class_T)s, class VoxelFunc>
511 void ForEachVoxel(VoxelFunc vf, const blocked_range<int> &re, %(imparams_by_pointer)s)
512 {
513  %(assert_is_not_reduction)s
514  ForEachVoxel(re, %(impargs)s, vf);
515 }
516 
517 // -----------------------------------------------------------------------------
518 template <%(class_T)s, class VoxelFunc>
519 void ForEachVoxel(const blocked_range2d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
520 {
521  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
522  body(re);
523  vf.join(body._VoxelFunc);
524 }
525 
526 // -----------------------------------------------------------------------------
527 template <%(class_T)s, class VoxelFunc>
528 void ForEachVoxel(VoxelFunc vf, const blocked_range2d<int> &re, %(imparams_by_pointer)s)
529 {
530  %(assert_is_not_reduction)s
531  ForEachVoxel(re, %(impargs)s, vf);
532 }
533 
534 // -----------------------------------------------------------------------------
535 template <%(class_T)s, class VoxelFunc>
536 void ForEachVoxel(const blocked_range3d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
537 {
538  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
539  body(re);
540  vf.join(body._VoxelFunc);
541 }
542 
543 // -----------------------------------------------------------------------------
544 template <%(class_T)s, class VoxelFunc>
545 void ForEachVoxel(VoxelFunc vf, const blocked_range3d<int> &re, %(imparams_by_pointer)s)
546 {
547  %(assert_is_not_reduction)s
548  ForEachVoxel(re, %(impargs)s, vf);
549 }
550 
551 //
552 // Image arguments by reference
553 //
554 
555 // -----------------------------------------------------------------------------
556 template <%(class_T)s, class VoxelFunc>
557 void ForEachScalar(%(imparams_by_reference)s, VoxelFunc &vf)
558 {
559  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
560  blocked_range<int> re(0, %(refim)s.GetNumberOfVoxels());
561  body(re);
562  vf.join(body._VoxelFunc);
563 }
564 
565 // -----------------------------------------------------------------------------
566 template <%(class_T)s, class VoxelFunc>
567 void ForEachScalar(VoxelFunc vf, %(imparams_by_reference)s)
568 {
569  %(assert_is_not_reduction)s
570  ForEachScalar(%(imargs)s, vf);
571 }
572 
573 // -----------------------------------------------------------------------------
574 template <%(class_T)s, class VoxelFunc>
575 void ForEachVoxel(%(imparams_by_reference)s, VoxelFunc &vf)
576 {
577  if (%(refim)s.GetTSize()) {
578  ForEachScalar(%(imargs)s, vf);
579  } else {
580  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
581  blocked_range<int> re(0, %(refim)s.GetNumberOfVoxels() / %(refim)s.GetT());
582  body(re);
583  vf.join(body._VoxelFunc);
584  }
585 }
586 
587 // -----------------------------------------------------------------------------
588 template <%(class_T)s, class VoxelFunc>
589 void ForEachVoxel(VoxelFunc vf, %(imparams_by_reference)s)
590 {
591  %(assert_is_not_reduction)s
592  ForEachVoxel(%(imargs)s, vf);
593 }
594 
595 // -----------------------------------------------------------------------------
596 template <%(class_T)s, class VoxelFunc>
597 void ForEachVoxel(const ImageAttributes &attr, %(imparams_by_reference)s, VoxelFunc &vf)
598 {
599  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
600  body(attr);
601  vf.join(body._VoxelFunc);
602 }
603 
604 // -----------------------------------------------------------------------------
605 template <%(class_T)s, class VoxelFunc>
606 void ForEachVoxel(VoxelFunc vf, const ImageAttributes &attr, %(imparams_by_reference)s)
607 {
608  %(assert_is_not_reduction)s
609  ForEachVoxel(attr, %(imargs)s, vf);
610 }
611 
612 // -----------------------------------------------------------------------------
613 template <%(class_T)s, class VoxelFunc>
614 void ForEachVoxel(const blocked_range<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
615 {
616  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
617  body(re);
618  vf.join(body._VoxelFunc);
619 }
620 
621 // -----------------------------------------------------------------------------
622 template <%(class_T)s, class VoxelFunc>
623 void ForEachVoxel(VoxelFunc vf, const blocked_range<int> &re, %(imparams_by_reference)s)
624 {
625  %(assert_is_not_reduction)s
626  ForEachVoxel(re, %(imargs)s, vf);
627 }
628 
629 // -----------------------------------------------------------------------------
630 template <%(class_T)s, class VoxelFunc>
631 void ForEachVoxel(const blocked_range2d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
632 {
633  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
634  body(re);
635  vf.join(body._VoxelFunc);
636 }
637 
638 // -----------------------------------------------------------------------------
639 template <%(class_T)s, class VoxelFunc>
640 void ForEachVoxel(VoxelFunc vf, const blocked_range2d<int> &re, %(imparams_by_reference)s)
641 {
642  %(assert_is_not_reduction)s
643  ForEachVoxel(re, %(imargs)s, vf);
644 }
645 
646 // -----------------------------------------------------------------------------
647 template <%(class_T)s, class VoxelFunc>
648 void ForEachVoxel(const blocked_range3d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
649 {
650  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
651  body(re);
652  vf.join(body._VoxelFunc);
653 }
654 
655 // -----------------------------------------------------------------------------
656 template <%(class_T)s, class VoxelFunc>
657 void ForEachVoxel(VoxelFunc vf, const blocked_range3d<int> &re, %(imparams_by_reference)s)
658 {
659  %(assert_is_not_reduction)s
660  ForEachVoxel(re, %(imargs)s, vf);
661 }
662 
663 // -----------------------------------------------------------------------------
664 // ForEachVoxelIf
665 // -----------------------------------------------------------------------------
666 
667 //
668 // Image arguments by pointer
669 //
670 
671 // -----------------------------------------------------------------------------
672 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
673 void ForEachScalarIf(%(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
674 {
675  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
676  blocked_range<int> re(0, %(refim)s->GetNumberOfVoxels());
677  body(re);
678  vf.join(body._VoxelFunc);
679  of.join(body._OutsideFunc);
680 }
681 
682 // -----------------------------------------------------------------------------
683 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
684 void ForEachScalarIf(VoxelFunc vf, OutsideFunc of, %(imparams_by_pointer)s)
685 {
686  %(assert_neither_is_not_reduction)s
687  ForEachScalarIf<Domain>(%(impargs)s, vf, of);
688 }
689 
690 // -----------------------------------------------------------------------------
691 template <class Domain, %(class_T)s, class VoxelFunc>
692 void ForEachScalarIf(%(imparams_by_pointer)s, VoxelFunc &vf)
693 {
694  NaryVoxelFunction::NOP of;
695  ForEachScalarIf<Domain>(%(impargs)s, vf, of);
696 }
697 
698 // -----------------------------------------------------------------------------
699 template <class Domain, %(class_T)s, class VoxelFunc>
700 void ForEachScalarIf(VoxelFunc vf, %(imparams_by_pointer)s)
701 {
702  %(assert_is_not_reduction)s
703  ForEachScalarIf<Domain>(%(impargs)s, vf);
704 }
705 
706 // -----------------------------------------------------------------------------
707 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
708 void ForEachVoxelIf(%(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
709 {
710  if (%(refim)s->GetTSize()) {
711  ForEachScalarIf<Domain>(%(impargs)s, vf, of);
712  } else {
713  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
714  blocked_range<int> re(0, %(refim)s->GetNumberOfVoxels() / %(refim)s->GetT());
715  body(re);
716  vf.join(body._VoxelFunc);
717  of.join(body._OutsideFunc);
718  }
719 }
720 
721 // -----------------------------------------------------------------------------
722 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
723 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, %(imparams_by_pointer)s)
724 {
725  %(assert_neither_is_not_reduction)s
726  ForEachVoxelIf<Domain>(%(impargs)s, vf, of);
727 }
728 
729 // -----------------------------------------------------------------------------
730 template <class Domain, %(class_T)s, class VoxelFunc>
731 void ForEachVoxelIf(%(imparams_by_pointer)s, VoxelFunc &vf)
732 {
733  NaryVoxelFunction::NOP of;
734  ForEachVoxelIf<Domain>(%(impargs)s, vf, of);
735 }
736 
737 // -----------------------------------------------------------------------------
738 template <class Domain, %(class_T)s, class VoxelFunc>
739 void ForEachVoxelIf(VoxelFunc vf, %(imparams_by_pointer)s)
740 {
741  %(assert_is_not_reduction)s
742  ForEachVoxelIf<Domain>(%(impargs)s, vf);
743 }
744 
745 // -----------------------------------------------------------------------------
746 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
747 void ForEachVoxelIf(const ImageAttributes &attr, %(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
748 {
749  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
750  body(attr);
751  vf.join(body._VoxelFunc);
752  of.join(body._OutsideFunc);
753 }
754 
755 // -----------------------------------------------------------------------------
756 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
757 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const ImageAttributes &attr, %(imparams_by_pointer)s)
758 {
759  %(assert_neither_is_not_reduction)s
760  ForEachVoxelIf<Domain>(attr, %(impargs)s, vf, of);
761 }
762 
763 // -----------------------------------------------------------------------------
764 template <class Domain, %(class_T)s, class VoxelFunc>
765 void ForEachVoxelIf(const ImageAttributes &attr, %(imparams_by_pointer)s, VoxelFunc &vf)
766 {
767  NaryVoxelFunction::NOP of;
768  ForEachVoxelIf<Domain>(attr, %(impargs)s, vf, of);
769 }
770 
771 // -----------------------------------------------------------------------------
772 template <class Domain, %(class_T)s, class VoxelFunc>
773 void ForEachVoxelIf(VoxelFunc vf, const ImageAttributes &attr, %(imparams_by_pointer)s)
774 {
775  %(assert_is_not_reduction)s
776  ForEachVoxelIf<Domain>(attr, %(impargs)s, vf);
777 }
778 
779 // -----------------------------------------------------------------------------
780 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
781 void ForEachVoxelIf(const blocked_range<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
782 {
783  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
784  body(re);
785  vf.join(body._VoxelFunc);
786  of.join(body._OutsideFunc);
787 }
788 
789 // -----------------------------------------------------------------------------
790 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
791 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range<int> &re, %(imparams_by_pointer)s)
792 {
793  %(assert_neither_is_not_reduction)s
794  ForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
795 }
796 
797 // -----------------------------------------------------------------------------
798 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
799 void ForEachVoxelIf(const blocked_range2d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
800 {
801  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
802  body(re);
803  vf.join(body._VoxelFunc);
804  of.join(body._OutsideFunc);
805 }
806 
807 // -----------------------------------------------------------------------------
808 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
809 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range2d<int> &re, %(imparams_by_pointer)s)
810 {
811  %(assert_neither_is_not_reduction)s
812  ForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
813 }
814 
815 // -----------------------------------------------------------------------------
816 template <class Domain, %(class_T)s, class VoxelFunc>
817 void ForEachVoxelIf(const blocked_range2d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
818 {
819  NaryVoxelFunction::NOP of;
820  ForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
821 }
822 
823 // -----------------------------------------------------------------------------
824 template <class Domain, %(class_T)s, class VoxelFunc>
825 void ForEachVoxelIf(VoxelFunc vf, const blocked_range2d<int> &re, %(imparams_by_pointer)s)
826 {
827  %(assert_is_not_reduction)s
828  ForEachVoxelIf<Domain>(re, %(impargs)s, vf);
829 }
830 
831 // -----------------------------------------------------------------------------
832 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
833 void ForEachVoxelIf(const blocked_range3d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
834 {
835  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
836  body(re);
837  vf.join(body._VoxelFunc);
838  of.join(body._OutsideFunc);
839 }
840 
841 // -----------------------------------------------------------------------------
842 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
843 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range3d<int> &re, %(imparams_by_pointer)s)
844 {
845  %(assert_neither_is_not_reduction)s
846  ForEachVoxelIf<Domain>(%(impargs)s, vf, of);
847 }
848 
849 // -----------------------------------------------------------------------------
850 template <class Domain, %(class_T)s, class VoxelFunc>
851 void ForEachVoxelIf(const blocked_range3d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
852 {
853  NaryVoxelFunction::NOP of;
854  ForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
855 }
856 
857 // -----------------------------------------------------------------------------
858 template <class Domain, %(class_T)s, class VoxelFunc>
859 void ForEachVoxelIf(VoxelFunc vf, const blocked_range3d<int> &re, %(imparams_by_pointer)s)
860 {
861  %(assert_is_not_reduction)s
862  ForEachVoxelIf<Domain>(re, %(impargs)s, vf);
863 }
864 
865 //
866 // Image arguments by reference
867 //
868 
869 // -----------------------------------------------------------------------------
870 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
871 void ForEachScalarIf(%(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
872 {
873  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
874  blocked_range<int> re(0, %(refim)s.GetNumberOfVoxels());
875  body(re);
876  vf.join(body._VoxelFunc);
877  of.join(body._OutsideFunc);
878 }
879 
880 // -----------------------------------------------------------------------------
881 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
882 void ForEachScalarIf(VoxelFunc vf, OutsideFunc of, %(imparams_by_reference)s)
883 {
884  %(assert_neither_is_not_reduction)s
885  ForEachScalarIf<Domain>(%(imargs)s, vf, of);
886 }
887 
888 // -----------------------------------------------------------------------------
889 template <class Domain, %(class_T)s, class VoxelFunc>
890 void ForEachScalarIf(%(imparams_by_reference)s, VoxelFunc &vf)
891 {
892  NaryVoxelFunction::NOP of;
893  ForEachScalarIf<Domain>(%(imargs)s, vf, of);
894 }
895 
896 // -----------------------------------------------------------------------------
897 template <class Domain, %(class_T)s, class VoxelFunc>
898 void ForEachScalarIf(VoxelFunc vf, %(imparams_by_reference)s)
899 {
900  %(assert_is_not_reduction)s
901  ForEachScalarIf<Domain>(%(imargs)s, vf);
902 }
903 
904 // -----------------------------------------------------------------------------
905 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
906 void ForEachVoxelIf(%(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
907 {
908  if (%(refim)s.GetTSize()) {
909  ForEachVoxelIf<Domain>(%(imargs)s, vf, of);
910  } else {
911  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
912  blocked_range<int> re(0, %(refim)s.GetNumberOfVoxels() / %(refim)s.GetT());
913  body(re);
914  vf.join(body._VoxelFunc);
915  of.join(body._OutsideFunc);
916  }
917 }
918 
919 // -----------------------------------------------------------------------------
920 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
921 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, %(imparams_by_reference)s)
922 {
923  %(assert_neither_is_not_reduction)s
924  ForEachVoxelIf<Domain>(%(imargs)s, vf, of);
925 }
926 
927 // -----------------------------------------------------------------------------
928 template <class Domain, %(class_T)s, class VoxelFunc>
929 void ForEachVoxelIf(%(imparams_by_reference)s, VoxelFunc &vf)
930 {
931  NaryVoxelFunction::NOP of;
932  ForEachVoxelIf<Domain>(%(imargs)s, vf, of);
933 }
934 
935 // -----------------------------------------------------------------------------
936 template <class Domain, %(class_T)s, class VoxelFunc>
937 void ForEachVoxelIf(VoxelFunc vf, %(imparams_by_reference)s)
938 {
939  %(assert_is_not_reduction)s
940  ForEachVoxelIf<Domain>(%(imargs)s, vf);
941 }
942 
943 // -----------------------------------------------------------------------------
944 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
945 void ForEachVoxelIf(const ImageAttributes &attr, %(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
946 {
947  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
948  body(attr);
949  vf.join(body._VoxelFunc);
950  of.join(body._OutsideFunc);
951 }
952 
953 // -----------------------------------------------------------------------------
954 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
955 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const ImageAttributes &attr, %(imparams_by_reference)s)
956 {
957  %(assert_neither_is_not_reduction)s
958  ForEachVoxelIf<Domain>(attr, %(imargs)s, vf, of);
959 }
960 
961 // -----------------------------------------------------------------------------
962 template <class Domain, %(class_T)s, class VoxelFunc>
963 void ForEachVoxelIf(const ImageAttributes &attr, %(imparams_by_reference)s, VoxelFunc &vf)
964 {
965  NaryVoxelFunction::NOP of;
966  ForEachVoxelIf<Domain>(attr, %(imargs)s, vf, of);
967 }
968 
969 // -----------------------------------------------------------------------------
970 template <class Domain, %(class_T)s, class VoxelFunc>
971 void ForEachVoxelIf(VoxelFunc vf, const ImageAttributes &attr, %(imparams_by_reference)s)
972 {
973  %(assert_is_not_reduction)s
974  ForEachVoxelIf<Domain>(attr, %(imargs)s, vf);
975 }
976 
977 // -----------------------------------------------------------------------------
978 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
979 void ForEachVoxelIf(const blocked_range<int> &re, %(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
980 {
981  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
982  body(re);
983  vf.join(body._VoxelFunc);
984  of.join(body._OutsideFunc);
985 }
986 
987 // -----------------------------------------------------------------------------
988 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
989 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range<int> &re, %(imparams_by_reference)s)
990 {
991  %(assert_neither_is_not_reduction)s
992  ForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
993 }
994 
995 // -----------------------------------------------------------------------------
996 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
997 void ForEachVoxelIf(const blocked_range2d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
998 {
999  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
1000  body(re);
1001  vf.join(body._VoxelFunc);
1002  of.join(body._OutsideFunc);
1003 }
1004 
1005 // -----------------------------------------------------------------------------
1006 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1007 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range2d<int> &re, %(imparams_by_reference)s)
1008 {
1009  %(assert_neither_is_not_reduction)s
1010  ForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1011 }
1012 
1013 // -----------------------------------------------------------------------------
1014 template <class Domain, %(class_T)s, class VoxelFunc>
1015 void ForEachVoxelIf(const blocked_range2d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
1016 {
1017  NaryVoxelFunction::NOP of;
1018  ForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1019 }
1020 
1021 // -----------------------------------------------------------------------------
1022 template <class Domain, %(class_T)s, class VoxelFunc>
1023 void ForEachVoxelIf(VoxelFunc vf, const blocked_range2d<int> &re, %(imparams_by_reference)s)
1024 {
1025  %(assert_is_not_reduction)s
1026  ForEachVoxelIf<Domain>(re, %(imargs)s, vf);
1027 }
1028 
1029 // -----------------------------------------------------------------------------
1030 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1031 void ForEachVoxelIf(const blocked_range3d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
1032 {
1033  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
1034  body(re);
1035  vf.join(body._VoxelFunc);
1036  of.join(body._OutsideFunc);
1037 }
1038 
1039 // -----------------------------------------------------------------------------
1040 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1041 void ForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range3d<int> &re, %(imparams_by_reference)s)
1042 {
1043  %(assert_neither_is_not_reduction)s
1044  ForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1045 }
1046 
1047 // -----------------------------------------------------------------------------
1048 template <class Domain, %(class_T)s, class VoxelFunc>
1049 void ForEachVoxelIf(const blocked_range3d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
1050 {
1051  NaryVoxelFunction::NOP of;
1052  ForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1053 }
1054 
1055 // -----------------------------------------------------------------------------
1056 template <class Domain, %(class_T)s, class VoxelFunc>
1057 void ForEachVoxelIf(VoxelFunc vf, const blocked_range3d<int> &re, %(imparams_by_reference)s)
1058 {
1059  %(assert_is_not_reduction)s
1060  ForEachVoxelIf<Domain>(re, %(imargs)s, vf);
1061 }
1062 
1063 // -----------------------------------------------------------------------------
1064 // ParallelForEachVoxel
1065 // -----------------------------------------------------------------------------
1066 
1067 //
1068 // Image arguments by pointer
1069 //
1070 
1071 // -----------------------------------------------------------------------------
1072 template <%(class_T)s, class VoxelFunc>
1073 void ParallelForEachScalar(%(imparams_by_pointer)s, VoxelFunc &vf)
1074 {
1075  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
1076  blocked_range<int> re(0, %(refim)s->GetNumberOfVoxels());
1077  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1078  else parallel_for (re, body);
1079 }
1080 
1081 // -----------------------------------------------------------------------------
1082 template <%(class_T)s, class VoxelFunc>
1083 void ParallelForEachScalar(VoxelFunc vf, %(imparams_by_pointer)s)
1084 {
1085  %(assert_is_not_reduction)s
1086  ParallelForEachScalar(%(impargs)s, vf);
1087 }
1088 
1089 // -----------------------------------------------------------------------------
1090 template <%(class_T)s, class VoxelFunc>
1091 void ParallelForEachVoxel(%(imparams_by_pointer)s, VoxelFunc &vf)
1092 {
1093  if (%(refim)s->GetTSize()) {
1094  ParallelForEachScalar(%(impargs)s, vf);
1095  } else {
1096  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
1097  blocked_range<int> re(0, %(refim)s->GetNumberOfVoxels() / %(refim)s->GetT());
1098  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1099  else parallel_for (re, body);
1100  }
1101 }
1102 
1103 // -----------------------------------------------------------------------------
1104 template <%(class_T)s, class VoxelFunc>
1105 void ParallelForEachVoxel(VoxelFunc vf, %(imparams_by_pointer)s)
1106 {
1107  %(assert_is_not_reduction)s
1108  ParallelForEachVoxel(%(impargs)s, vf);
1109 }
1110 
1111 // -----------------------------------------------------------------------------
1112 template <%(class_T)s, class VoxelFunc>
1113 void ParallelForEachVoxel(const ImageAttributes &attr, %(imparams_by_pointer)s, VoxelFunc &vf)
1114 {
1115  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
1116  blocked_range3d<int> re(0, attr._z, 0, attr._y, 0, attr._x);
1117  if (VoxelFunc::IsReduction()) {
1118  if (attr._dt) {
1119  for (body._l = 0; body._l < attr._t; ++body._l) parallel_reduce(re, body);
1120  } else {
1121  parallel_reduce(re, body);
1122  }
1123  vf.join(body._VoxelFunc);
1124  } else {
1125  if (attr._dt) {
1126  for (body._l = 0; body._l < attr._t; ++body._l) parallel_for(re, body);
1127  } else {
1128  parallel_for(re, body);
1129  }
1130  }
1131 }
1132 
1133 // -----------------------------------------------------------------------------
1134 template <%(class_T)s, class VoxelFunc>
1135 void ParallelForEachVoxel(VoxelFunc vf, const ImageAttributes &attr, %(imparams_by_pointer)s)
1136 {
1137  %(assert_is_not_reduction)s
1138  ParallelForEachVoxel(attr, %(impargs)s, vf);
1139 }
1140 
1141 // -----------------------------------------------------------------------------
1142 template <%(class_T)s, class VoxelFunc>
1143 void ParallelForEachVoxel(const blocked_range<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
1144 {
1145  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
1146  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1147  else parallel_for (re, body);
1148 }
1149 
1150 // -----------------------------------------------------------------------------
1151 template <%(class_T)s, class VoxelFunc>
1152 void ParallelForEachVoxel(VoxelFunc vf, const blocked_range<int> &re, %(imparams_by_pointer)s)
1153 {
1154  %(assert_is_not_reduction)s
1155  ParallelForEachVoxel(re, %(impargs)s, vf);
1156 }
1157 
1158 // -----------------------------------------------------------------------------
1159 template <%(class_T)s, class VoxelFunc>
1160 void ParallelForEachVoxel(const blocked_range2d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
1161 {
1162  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
1163  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1164  else parallel_for (re, body);
1165 }
1166 
1167 // -----------------------------------------------------------------------------
1168 template <%(class_T)s, class VoxelFunc>
1169 void ParallelForEachVoxel(VoxelFunc vf, const blocked_range2d<int> &re, %(imparams_by_pointer)s)
1170 {
1171  %(assert_is_not_reduction)s
1172  ParallelForEachVoxel(re, %(impargs)s, vf);
1173 }
1174 
1175 // -----------------------------------------------------------------------------
1176 template <%(class_T)s, class VoxelFunc>
1177 void ParallelForEachVoxel(const blocked_range3d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
1178 {
1179  %(class_name1)s<%(T)s, VoxelFunc> body(%(impargs)s, vf);
1180  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1181  else parallel_for (re, body);
1182 }
1183 
1184 // -----------------------------------------------------------------------------
1185 template <%(class_T)s, class VoxelFunc>
1186 void ParallelForEachVoxel(VoxelFunc vf, const blocked_range3d<int> &re, %(imparams_by_pointer)s)
1187 {
1188  %(assert_is_not_reduction)s
1189  ParallelForEachVoxel(re, %(impargs)s, vf);
1190 }
1191 
1192 //
1193 // Image arguments by reference
1194 //
1195 
1196 // -----------------------------------------------------------------------------
1197 template <%(class_T)s, class VoxelFunc>
1198 void ParallelForEachScalar(%(imparams_by_reference)s, VoxelFunc &vf)
1199 {
1200  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
1201  blocked_range<int> re(0, %(refim)s.GetNumberOfVoxels());
1202  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1203  else parallel_for (re, body);
1204 }
1205 
1206 // -----------------------------------------------------------------------------
1207 template <%(class_T)s, class VoxelFunc>
1208 void ParallelForEachScalar(VoxelFunc vf, %(imparams_by_reference)s)
1209 {
1210  %(assert_is_not_reduction)s
1211  ParallelForEachScalar(%(imargs)s, vf);
1212 }
1213 
1214 // -----------------------------------------------------------------------------
1215 template <%(class_T)s, class VoxelFunc>
1216 void ParallelForEachVoxel(%(imparams_by_reference)s, VoxelFunc &vf)
1217 {
1218  if (%(refim)s.GetTSize()) {
1219  ParallelForEachScalar(%(imargs)s, vf);
1220  } else {
1221  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
1222  blocked_range<int> re(0, %(refim)s.GetNumberOfVoxels() / %(refim)s.GetT());
1223  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1224  else parallel_for (re, body);
1225  }
1226 }
1227 
1228 // -----------------------------------------------------------------------------
1229 template <%(class_T)s, class VoxelFunc>
1230 void ParallelForEachVoxel(VoxelFunc vf, %(imparams_by_reference)s)
1231 {
1232  %(assert_is_not_reduction)s
1233  ParallelForEachVoxel(%(imargs)s, vf);
1234 }
1235 
1236 // -----------------------------------------------------------------------------
1237 template <%(class_T)s, class VoxelFunc>
1238 void ParallelForEachVoxel(const ImageAttributes &attr, %(imparams_by_reference)s, VoxelFunc &vf)
1239 {
1240  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
1241  blocked_range3d<int> re(0, attr._z, 0, attr._y, 0, attr._x);
1242  if (VoxelFunc::IsReduction()) {
1243  if (attr._dt) {
1244  for (body._l = 0; body._l < attr._t; ++body._l) parallel_reduce(re, body);
1245  } else {
1246  parallel_reduce(re, body);
1247  }
1248  vf.join(body._VoxelFunc);
1249  } else {
1250  if (attr._dt) {
1251  for (body._l = 0; body._l < attr._t; ++body._l) parallel_for(re, body);
1252  } else {
1253  parallel_for(re, body);
1254  }
1255  }
1256 }
1257 
1258 // -----------------------------------------------------------------------------
1259 template <%(class_T)s, class VoxelFunc>
1260 void ParallelForEachVoxel(VoxelFunc vf, const ImageAttributes &attr, %(imparams_by_reference)s)
1261 {
1262  %(assert_is_not_reduction)s
1263  ParallelForEachVoxel(attr, %(imargs)s, vf);
1264 }
1265 
1266 // -----------------------------------------------------------------------------
1267 template <%(class_T)s, class VoxelFunc>
1268 void ParallelForEachVoxel(const blocked_range<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
1269 {
1270  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
1271  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1272  else parallel_for (re, body);
1273 }
1274 
1275 // -----------------------------------------------------------------------------
1276 template <%(class_T)s, class VoxelFunc>
1277 void ParallelForEachVoxel(VoxelFunc vf, const blocked_range<int> &re, %(imparams_by_reference)s)
1278 {
1279  %(assert_is_not_reduction)s
1280  ParallelForEachVoxel(re, %(imargs)s, vf);
1281 }
1282 
1283 // -----------------------------------------------------------------------------
1284 template <%(class_T)s, class VoxelFunc>
1285 void ParallelForEachVoxel(const blocked_range2d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
1286 {
1287  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
1288  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1289  else parallel_for (re, body);
1290 }
1291 
1292 // -----------------------------------------------------------------------------
1293 template <%(class_T)s, class VoxelFunc>
1294 void ParallelForEachVoxel(VoxelFunc vf, const blocked_range2d<int> &re, %(imparams_by_reference)s)
1295 {
1296  %(assert_is_not_reduction)s
1297  ParallelForEachVoxel(re, %(imargs)s, vf);
1298 }
1299 
1300 // -----------------------------------------------------------------------------
1301 template <%(class_T)s, class VoxelFunc>
1302 void ParallelForEachVoxel(const blocked_range3d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
1303 {
1304  %(class_name1)s<%(T)s, VoxelFunc> body(%(imargs)s, vf);
1305  if (VoxelFunc::IsReduction()) { parallel_reduce(re, body); vf.join(body._VoxelFunc); }
1306  else parallel_for (re, body);
1307 }
1308 
1309 // -----------------------------------------------------------------------------
1310 template <%(class_T)s, class VoxelFunc>
1311 void ParallelForEachVoxel(VoxelFunc vf, const blocked_range3d<int> &re, %(imparams_by_reference)s)
1312 {
1313  %(assert_is_not_reduction)s
1314  ParallelForEachVoxel(re, %(imargs)s, vf);
1315 }
1316 
1317 // -----------------------------------------------------------------------------
1318 // ParallelForEachVoxelIf
1319 // -----------------------------------------------------------------------------
1320 
1321 //
1322 // Image arguments by pointer
1323 //
1324 
1325 // -----------------------------------------------------------------------------
1326 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1327 void ParallelForEachScalarIf(%(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
1328 {
1329  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
1330  blocked_range<int> re(0, %(refim)s->GetNumberOfVoxels());
1331  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1332  parallel_reduce(re, body);
1333  vf.join(body._VoxelFunc);
1334  of.join(body._OutsideFunc);
1335  } else {
1336  parallel_for(re, body);
1337  }
1338 }
1339 
1340 // -----------------------------------------------------------------------------
1341 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1342 void ParallelForEachScalarIf(VoxelFunc vf, OutsideFunc of, %(imparams_by_pointer)s)
1343 {
1344  %(assert_neither_is_not_reduction)s
1345  ParallelForEachScalarIf<Domain>(%(impargs)s, vf, of);
1346 }
1347 
1348 // -----------------------------------------------------------------------------
1349 template <class Domain, %(class_T)s, class VoxelFunc>
1350 void ParallelForEachScalarIf(%(imparams_by_pointer)s, VoxelFunc &vf)
1351 {
1352  NaryVoxelFunction::NOP of;
1353  ParallelForEachScalarIf<Domain>(%(impargs)s, vf, of);
1354 }
1355 
1356 // -----------------------------------------------------------------------------
1357 template <class Domain, %(class_T)s, class VoxelFunc>
1358 void ParallelForEachScalarIf(VoxelFunc vf, %(imparams_by_pointer)s)
1359 {
1360  %(assert_is_not_reduction)s
1361  ParallelForEachScalarIf<Domain>(%(impargs)s, vf);
1362 }
1363 
1364 // -----------------------------------------------------------------------------
1365 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1366 void ParallelForEachVoxelIf(%(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
1367 {
1368  if (%(refim)s->GetTSize()) {
1369  ParallelForEachVoxelIf<Domain>(%(impargs)s, vf, of);
1370  } else {
1371  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
1372  blocked_range<int> re(0, %(refim)s->GetNumberOfVoxels() / %(refim)s->GetT());
1373  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1374  parallel_reduce(re, body);
1375  vf.join(body._VoxelFunc);
1376  of.join(body._OutsideFunc);
1377  } else {
1378  parallel_for(re, body);
1379  }
1380  }
1381 }
1382 
1383 // -----------------------------------------------------------------------------
1384 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1385 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, %(imparams_by_pointer)s)
1386 {
1387  %(assert_neither_is_not_reduction)s
1388  ParallelForEachVoxelIf<Domain>(%(impargs)s, vf, of);
1389 }
1390 
1391 // -----------------------------------------------------------------------------
1392 template <class Domain, %(class_T)s, class VoxelFunc>
1393 void ParallelForEachVoxelIf(%(imparams_by_pointer)s, VoxelFunc &vf)
1394 {
1395  NaryVoxelFunction::NOP of;
1396  ParallelForEachVoxelIf<Domain>(%(impargs)s, vf, of);
1397 }
1398 
1399 // -----------------------------------------------------------------------------
1400 template <class Domain, %(class_T)s, class VoxelFunc>
1401 void ParallelForEachVoxelIf(VoxelFunc vf, %(imparams_by_pointer)s)
1402 {
1403  %(assert_is_not_reduction)s
1404  ParallelForEachVoxelIf<Domain>(%(impargs)s, vf);
1405 }
1406 
1407 // -----------------------------------------------------------------------------
1408 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1409 void ParallelForEachVoxelIf(const ImageAttributes &attr, %(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
1410 {
1411  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
1412  blocked_range3d<int> re(0, attr._z, 0, attr._y, 0, attr._x);
1413  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1414  if (attr._dt) {
1415  for (body._l = 0; body._l < attr._t; ++body._l) parallel_reduce(re, body);
1416  } else {
1417  parallel_reduce(re, body);
1418  }
1419  vf.join(body._VoxelFunc);
1420  of.join(body._OutsideFunc);
1421  } else {
1422  if (attr._dt) {
1423  for (body._l = 0; body._l < attr._t; ++body._l) parallel_for(re, body);
1424  } else {
1425  parallel_for(re, body);
1426  }
1427  }
1428 }
1429 
1430 // -----------------------------------------------------------------------------
1431 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1432 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const ImageAttributes &attr, %(imparams_by_pointer)s)
1433 {
1434  %(assert_neither_is_not_reduction)s
1435  ParallelForEachVoxelIf<Domain>(attr, %(impargs)s, vf, of);
1436 }
1437 
1438 // -----------------------------------------------------------------------------
1439 template <class Domain, %(class_T)s, class VoxelFunc>
1440 void ParallelForEachVoxelIf(const ImageAttributes &attr, %(imparams_by_pointer)s, VoxelFunc &vf)
1441 {
1442  NaryVoxelFunction::NOP of;
1443  ParallelForEachVoxelIf<Domain>(attr, %(impargs)s, vf, of);
1444 }
1445 
1446 // -----------------------------------------------------------------------------
1447 template <class Domain, %(class_T)s, class VoxelFunc>
1448 void ParallelForEachVoxelIf(VoxelFunc vf, const ImageAttributes &attr, %(imparams_by_pointer)s)
1449 {
1450  %(assert_is_not_reduction)s
1451  ParallelForEachVoxelIf<Domain>(attr, %(impargs)s, vf);
1452 }
1453 
1454 // -----------------------------------------------------------------------------
1455 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1456 void ParallelForEachVoxelIf(const blocked_range<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
1457 {
1458  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
1459  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1460  parallel_reduce(re, body);
1461  vf.join(body._VoxelFunc);
1462  of.join(body._OutsideFunc);
1463  } else {
1464  parallel_for(re, body);
1465  }
1466 }
1467 
1468 // -----------------------------------------------------------------------------
1469 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1470 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range<int> &re, %(imparams_by_pointer)s)
1471 {
1472  %(assert_neither_is_not_reduction)s
1473  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
1474 }
1475 
1476 // -----------------------------------------------------------------------------
1477 template <class Domain, %(class_T)s, class VoxelFunc>
1478 void ParallelForEachVoxelIf(const blocked_range<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
1479 {
1480  NaryVoxelFunction::NOP of;
1481  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
1482 }
1483 
1484 // -----------------------------------------------------------------------------
1485 template <class Domain, %(class_T)s, class VoxelFunc>
1486 void ParallelForEachVoxelIf(VoxelFunc vf, const blocked_range<int> &re, %(imparams_by_pointer)s)
1487 {
1488  %(assert_is_not_reduction)s
1489  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf);
1490 }
1491 
1492 // -----------------------------------------------------------------------------
1493 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1494 void ParallelForEachVoxelIf(const blocked_range2d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
1495 {
1496  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
1497  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1498  parallel_reduce(re, body);
1499  vf.join(body._VoxelFunc);
1500  of.join(body._OutsideFunc);
1501  } else {
1502  parallel_for(re, body);
1503  }
1504 }
1505 
1506 // -----------------------------------------------------------------------------
1507 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1508 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range2d<int> &re, %(imparams_by_pointer)s)
1509 {
1510  %(assert_neither_is_not_reduction)s
1511  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
1512 }
1513 
1514 // -----------------------------------------------------------------------------
1515 template <class Domain, %(class_T)s, class VoxelFunc>
1516 void ParallelForEachVoxelIf(const blocked_range2d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
1517 {
1518  NaryVoxelFunction::NOP of;
1519  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
1520 }
1521 
1522 // -----------------------------------------------------------------------------
1523 template <class Domain, %(class_T)s, class VoxelFunc>
1524 void ParallelForEachVoxelIf(VoxelFunc vf, const blocked_range2d<int> &re, %(imparams_by_pointer)s)
1525 {
1526  %(assert_is_not_reduction)s
1527  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf);
1528 }
1529 
1530 // -----------------------------------------------------------------------------
1531 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1532 void ParallelForEachVoxelIf(const blocked_range3d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf, OutsideFunc &of)
1533 {
1534  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(impargs)s, vf, of);
1535  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1536  parallel_reduce(re, body);
1537  vf.join(body._VoxelFunc);
1538  of.join(body._OutsideFunc);
1539  } else {
1540  parallel_for(re, body);
1541  }
1542 }
1543 
1544 // -----------------------------------------------------------------------------
1545 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1546 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range3d<int> &re, %(imparams_by_pointer)s)
1547 {
1548  %(assert_neither_is_not_reduction)s
1549  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
1550 }
1551 
1552 // -----------------------------------------------------------------------------
1553 template <class Domain, %(class_T)s, class VoxelFunc>
1554 void ParallelForEachVoxelIf(const blocked_range3d<int> &re, %(imparams_by_pointer)s, VoxelFunc &vf)
1555 {
1556  NaryVoxelFunction::NOP of;
1557  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf, of);
1558 }
1559 
1560 // -----------------------------------------------------------------------------
1561 template <class Domain, %(class_T)s, class VoxelFunc>
1562 void ParallelForEachVoxelIf(VoxelFunc vf, const blocked_range3d<int> &re, %(imparams_by_pointer)s)
1563 {
1564  %(assert_is_not_reduction)s
1565  ParallelForEachVoxelIf<Domain>(re, %(impargs)s, vf);
1566 }
1567 
1568 //
1569 // Image arguments by reference
1570 //
1571 
1572 // -----------------------------------------------------------------------------
1573 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1574 void ParallelForEachScalarIf(%(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
1575 {
1576  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
1577  blocked_range<int> re(0, %(refim)s.GetNumberOfVoxels());
1578  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1579  parallel_reduce(re, body);
1580  vf.join(body._VoxelFunc);
1581  of.join(body._OutsideFunc);
1582  } else {
1583  parallel_for(re, body);
1584  }
1585 }
1586 
1587 // -----------------------------------------------------------------------------
1588 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1589 void ParallelForEachScalarIf(VoxelFunc vf, OutsideFunc of, %(imparams_by_reference)s)
1590 {
1591  %(assert_neither_is_not_reduction)s
1592  ParallelForEachScalarIf<Domain>(%(imargs)s, vf, of);
1593 }
1594 
1595 // -----------------------------------------------------------------------------
1596 template <class Domain, %(class_T)s, class VoxelFunc>
1597 void ParallelForEachScalarIf(%(imparams_by_reference)s, VoxelFunc &vf)
1598 {
1599  NaryVoxelFunction::NOP of;
1600  ParallelForEachScalarIf<Domain>(%(imargs)s, vf, of);
1601 }
1602 
1603 // -----------------------------------------------------------------------------
1604 template <class Domain, %(class_T)s, class VoxelFunc>
1605 void ParallelForEachScalarIf(VoxelFunc vf, %(imparams_by_reference)s)
1606 {
1607  %(assert_is_not_reduction)s
1608  ParallelForEachScalarIf<Domain>(%(imargs)s, vf);
1609 }
1610 
1611 // -----------------------------------------------------------------------------
1612 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1613 void ParallelForEachVoxelIf(%(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
1614 {
1615  if (%(refim)s.GetTSize()) {
1616  ParallelForEachVoxelIf<Domain>(%(imargs)s, vf, of);
1617  } else {
1618  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
1619  blocked_range<int> re(0, %(refim)s.GetNumberOfVoxels() / %(refim)s.GetT());
1620  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1621  parallel_reduce(re, body);
1622  vf.join(body._VoxelFunc);
1623  of.join(body._OutsideFunc);
1624  } else {
1625  parallel_for(re, body);
1626  }
1627  }
1628 }
1629 
1630 // -----------------------------------------------------------------------------
1631 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1632 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, %(imparams_by_reference)s)
1633 {
1634  %(assert_neither_is_not_reduction)s
1635  ParallelForEachVoxelIf<Domain>(%(imargs)s, vf, of);
1636 }
1637 
1638 // -----------------------------------------------------------------------------
1639 template <class Domain, %(class_T)s, class VoxelFunc>
1640 void ParallelForEachVoxelIf(%(imparams_by_reference)s, VoxelFunc &vf)
1641 {
1642  NaryVoxelFunction::NOP of;
1643  ParallelForEachVoxelIf<Domain>(%(imargs)s, vf, of);
1644 }
1645 
1646 // -----------------------------------------------------------------------------
1647 template <class Domain, %(class_T)s, class VoxelFunc>
1648 void ParallelForEachVoxelIf(VoxelFunc vf, %(imparams_by_reference)s)
1649 {
1650  %(assert_is_not_reduction)s
1651  ParallelForEachVoxelIf<Domain>(%(imargs)s, vf);
1652 }
1653 
1654 // -----------------------------------------------------------------------------
1655 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1656 void ParallelForEachVoxelIf(const ImageAttributes &attr, %(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
1657 {
1658  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
1659  blocked_range3d<int> re(0, attr._z, 0, attr._y, 0, attr._x);
1660  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1661  if (attr._dt) {
1662  for (body._l = 0; body._l < attr._t; ++body._l) parallel_reduce(re, body);
1663  } else {
1664  parallel_reduce(re, body);
1665  }
1666  vf.join(body._VoxelFunc);
1667  of.join(body._OutsideFunc);
1668  } else {
1669  if (attr._dt) {
1670  for (body._l = 0; body._l < attr._t; ++body._l) parallel_for(re, body);
1671  } else {
1672  parallel_for(re, body);
1673  }
1674  }
1675 }
1676 
1677 // -----------------------------------------------------------------------------
1678 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1679 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const ImageAttributes &attr, %(imparams_by_reference)s)
1680 {
1681  %(assert_neither_is_not_reduction)s
1682  ParallelForEachVoxelIf<Domain>(attr, %(imargs)s, vf, of);
1683 }
1684 
1685 // -----------------------------------------------------------------------------
1686 template <class Domain, %(class_T)s, class VoxelFunc>
1687 void ParallelForEachVoxelIf(const ImageAttributes &attr, %(imparams_by_reference)s, VoxelFunc &vf)
1688 {
1689  NaryVoxelFunction::NOP of;
1690  ParallelForEachVoxelIf<Domain>(attr, %(imargs)s, vf, of);
1691 }
1692 
1693 // -----------------------------------------------------------------------------
1694 template <class Domain, %(class_T)s, class VoxelFunc>
1695 void ParallelForEachVoxelIf(VoxelFunc vf, const ImageAttributes &attr, %(imparams_by_reference)s)
1696 {
1697  %(assert_is_not_reduction)s
1698  ParallelForEachVoxelIf<Domain>(attr, %(imargs)s, vf);
1699 }
1700 
1701 // -----------------------------------------------------------------------------
1702 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1703 void ParallelForEachVoxelIf(const blocked_range<int> &re, %(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
1704 {
1705  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
1706  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1707  parallel_reduce(re, body);
1708  vf.join(body._VoxelFunc);
1709  of.join(body._OutsideFunc);
1710  } else {
1711  parallel_for(re, body);
1712  }
1713 }
1714 
1715 // -----------------------------------------------------------------------------
1716 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1717 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range<int> &re, %(imparams_by_reference)s)
1718 {
1719  %(assert_neither_is_not_reduction)s
1720  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1721 }
1722 
1723 // -----------------------------------------------------------------------------
1724 template <class Domain, %(class_T)s, class VoxelFunc>
1725 void ParallelForEachVoxelIf(const blocked_range<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
1726 {
1727  NaryVoxelFunction::NOP of;
1728  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1729 }
1730 
1731 // -----------------------------------------------------------------------------
1732 template <class Domain, %(class_T)s, class VoxelFunc>
1733 void ParallelForEachVoxelIf(VoxelFunc vf, const blocked_range<int> &re, %(imparams_by_reference)s)
1734 {
1735  %(assert_is_not_reduction)s
1736  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf);
1737 }
1738 
1739 // -----------------------------------------------------------------------------
1740 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1741 void ParallelForEachVoxelIf(const blocked_range2d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
1742 {
1743  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
1744  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1745  parallel_reduce(re, body);
1746  vf.join(body._VoxelFunc);
1747  of.join(body._OutsideFunc);
1748  } else {
1749  parallel_for(re, body);
1750  }
1751 }
1752 
1753 // -----------------------------------------------------------------------------
1754 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1755 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range2d<int> &re, %(imparams_by_reference)s)
1756 {
1757  %(assert_neither_is_not_reduction)s
1758  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1759 }
1760 
1761 // -----------------------------------------------------------------------------
1762 template <class Domain, %(class_T)s, class VoxelFunc>
1763 void ParallelForEachVoxelIf(const blocked_range2d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
1764 {
1765  NaryVoxelFunction::NOP of;
1766  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1767 }
1768 
1769 // -----------------------------------------------------------------------------
1770 template <class Domain, %(class_T)s, class VoxelFunc>
1771 void ParallelForEachVoxelIf(VoxelFunc vf, const blocked_range2d<int> &re, %(imparams_by_reference)s)
1772 {
1773  %(assert_is_not_reduction)s
1774  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf);
1775 }
1776 
1777 // -----------------------------------------------------------------------------
1778 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1779 void ParallelForEachVoxelIf(const blocked_range3d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf, OutsideFunc &of)
1780 {
1781  %(class_name2)s<%(T)s, VoxelFunc, OutsideFunc, Domain> body(%(imargs)s, vf, of);
1782  if (VoxelFunc::IsReduction() || OutsideFunc::IsReduction()) {
1783  parallel_reduce(re, body);
1784  vf.join(body._VoxelFunc);
1785  of.join(body._OutsideFunc);
1786  } else {
1787  parallel_for(re, body);
1788  }
1789 }
1790 
1791 // -----------------------------------------------------------------------------
1792 template <class Domain, %(class_T)s, class VoxelFunc, class OutsideFunc>
1793 void ParallelForEachVoxelIf(VoxelFunc vf, OutsideFunc of, const blocked_range3d<int> &re, %(imparams_by_reference)s)
1794 {
1795  %(assert_neither_is_not_reduction)s
1796  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1797 }
1798 
1799 // -----------------------------------------------------------------------------
1800 template <class Domain, %(class_T)s, class VoxelFunc>
1801 void ParallelForEachVoxelIf(const blocked_range3d<int> &re, %(imparams_by_reference)s, VoxelFunc &vf)
1802 {
1803  NaryVoxelFunction::NOP of;
1804  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf, of);
1805 }
1806 
1807 // -----------------------------------------------------------------------------
1808 template <class Domain, %(class_T)s, class VoxelFunc>
1809 void ParallelForEachVoxelIf(VoxelFunc vf, const blocked_range3d<int> &re, %(imparams_by_reference)s)
1810 {
1811  %(assert_is_not_reduction)s
1812  ParallelForEachVoxelIf<Domain>(re, %(imargs)s, vf);
1813 }
1814 """ % cfg)
1815 
1816 # ------------------------------------------------------------------------------
1817 # main
1818 f.write("""
1819 inline void _foreach%svoxelfunction_must_not_be_reduction()
1820 {
1821  cerr << "(Parallel)ForEachVoxel(If): Voxel reductions must be passed by reference!"
1822  " Pass voxel functor object(s) as last argument(s) instead of first." << endl;
1823  exit(1);
1824 }
1825 
1826 """ % arity_to_string(arity))
1827 
1828 num_const = arity
1829 while num_const >= 0:
1830  gencode(arity, num_const)
1831  num_const -= 1
1832 
1833 # ------------------------------------------------------------------------------
1834 # footer - end of namespace and include guard
1835 f.write("""
1836 
1837 } // namespace mirtk
1838 
1839 #endif
1840 """)
1841 f.close()