Tachyon (current)  Current Main Branch
parallel.c
Go to the documentation of this file.
1 /*
2  * parallel.c - This file contains all of the code for doing parallel
3  * message passing and such.
4  *
5  * (C) Copyright 1994-2022 John E. Stone
6  * SPDX-License-Identifier: BSD-3-Clause
7  *
8  * $Id: parallel.c,v 1.63 2022/02/18 17:55:28 johns Exp $
9  *
10  */
11 
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <math.h>
16 
17 #define TACHYON_INTERNAL 1
18 #include "tachyon.h"
19 #include "macros.h"
20 #include "parallel.h"
21 #include "tgafile.h"
22 #include "util.h"
23 #include "threads.h"
24 
25 #if !defined(_MSC_VER)
26 #include <unistd.h>
27 #endif
28 
29 #ifdef MPI
30 #include <mpi.h>
31 
32 typedef struct {
33  int totalrows;
34  int count;
35  int curmsg;
36  int haveinited;
37  int havestarted;
38  MPI_Request * requests;
39  MPI_Status * statuses;
40  int * indices;
41 } pardata;
42 
43 /*
44  * Check to see if we have to pass the MPI_IN_PLACE flag
45  * for our allgather reductions during startup.
46  */
47 #if !defined(USE_MPI_IN_PLACE)
48 #if (MPI_VERSION >= 2) || defined(MPI_IN_PLACE)
49 #define USE_MPI_IN_PLACE 1
50 #endif
51 #endif
52 
53 #endif /* MPI */
54 
55 
56 typedef struct {
57  int mpienabled; /* Whether MPI is enabled or not at runtime */
58  int mpi_client; /* Whether or not Tachyon initialized MPI for itself */
59  int owns_comm; /* Whether we created the communicator or not */
60 #ifdef MPI
61  MPI_Comm comm; /* Our MPI communicator se */
62  int color; /* Param given to MPI_Comm_split() */
63  int key; /* Param given to MPI_Comm_split() */
64 #endif
65  int worldrank; /* our rank within the MPI_COMM_WORLD */
66  int worldsize; /* size of MPI_COMM_WORLD */
67  int callrank; /* our rank within the calling code's communicator */
68  int callsize; /* size of calling communicator */
69  int commrank; /* our rank within our own sub-communicator */
70  int commsize; /* size of our communicator */
71 } parhandle;
72 
73 
74 static void rt_par_comm_default(parhandle *ph) {
75  if (ph != NULL) {
76  ph->mpienabled=0;
77 #ifdef MPI
78  ph->mpienabled=1;
79  ph->comm = MPI_COMM_WORLD; /* Use global communicator by default */
80 #endif
81  ph->mpi_client = 0; /* Tachyon initialized MPI for itself */
82  ph->owns_comm = 1; /* we own the communicator we're using */
83  ph->worldrank = 0; /* we are rank 0 unless MPI is used */
84  ph->worldsize = 1; /* group has size 1 unless MPI is used */
85  ph->callrank = 0; /* we are rank 0 unless MPI is used */
86  ph->callsize = 1; /* group has size 1 unless MPI is used */
87  ph->commrank = 0; /* we are rank 0 unless MPI is used */
88  ph->commsize = 1; /* group has size 1 unless MPI is used */
89  }
90 }
91 
92 #ifdef MPI
93 static void rt_par_comm_info(parhandle *ph, MPI_Comm *caller_comm) {
94  if (ph != NULL) {
95  /* record this node's rank among various communicators */
96  MPI_Comm_rank(MPI_COMM_WORLD, &ph->worldrank);
97  MPI_Comm_size(MPI_COMM_WORLD, &ph->worldsize);
98  MPI_Comm_rank(*caller_comm, &ph->callrank);
99  MPI_Comm_size(*caller_comm, &ph->callsize);
100  MPI_Comm_rank(ph->comm, &ph->commrank);
101  MPI_Comm_size(ph->comm, &ph->commsize);
102  }
103 }
104 #endif
105 
106 
107 rt_parhandle rt_par_init_nompi(void) {
108  parhandle *ph = (parhandle *) calloc(1, sizeof(parhandle));
110  ph->mpienabled=0; /* disable MPI for this run */
111  return ph;
112 }
113 
114 
115 rt_parhandle rt_par_init(int * argc, char ***argv) {
116  parhandle *ph = (parhandle *) calloc(1, sizeof(parhandle));
118 
119 #ifdef MPI
120  MPI_Init(argc, argv);
121  rt_par_comm_default(ph); /* Reset to known default state */
122  ph->mpi_client = 0; /* Tachyon initialized MPI itself */
123  ph->owns_comm = 0; /* We're using MPI_COMM_WORLD, don't delete it */
124  ph->comm = MPI_COMM_WORLD; /* Use global communicator by default */
125  rt_par_comm_info(ph, &ph->comm);
126 #endif
127 
128  return ph;
129 }
130 
131 
132 rt_parhandle rt_par_init_mpi_comm(void * mpicomm) {
133 #ifdef MPI
134  MPI_Comm *caller_comm = (MPI_Comm *) mpicomm;
135  if (caller_comm != NULL) {
136  parhandle *ph=(parhandle *) calloc(1, sizeof(parhandle));
137  rt_par_comm_default(ph); /* Reset to known default state */
138  ph->mpi_client = 1; /* Tachyon is a client of a calling MPI code */
139  ph->owns_comm = 0; /* Caller created the communicator we're using */
140  ph->comm = *caller_comm; /* Use caller-provided communicator */
141  rt_par_comm_info(ph, &ph->comm);
142 
143  return ph;
144  }
145 #endif
146 
147  return NULL; /* not supported for non-MPI builds */
148 }
149 
150 
151 rt_parhandle rt_par_init_mpi_comm_world(void) {
152 #ifdef MPI
153  MPI_Comm comm = MPI_COMM_WORLD;
154  return rt_par_init_mpi_comm(&comm);
155 #endif
156  return NULL; /* not supported for non-MPI builds */
157 }
158 
159 
160 rt_parhandle rt_par_init_mpi_comm_split(void * mpicomm, int color, int key) {
161 #ifdef MPI
162  MPI_Comm *caller_comm = (MPI_Comm *) mpicomm;
163  if (caller_comm != NULL) {
164  parhandle *ph=(parhandle *) calloc(1, sizeof(parhandle));
165  rt_par_comm_default(ph); /* Reset to known default state */
166  ph->mpi_client = 1; /* Tachyon is a client of a calling MPI code */
167  ph->owns_comm = 1; /* Tachyon created the communicator it's using */
168  MPI_Comm_split(*caller_comm, color, key, &ph->comm);
169  rt_par_comm_info(ph, caller_comm);
170 
171  return ph;
172  }
173 #endif
174 
175  return NULL; /* not supported for non-MPI builds */
176 }
177 
178 
179 int rt_par_set_mpi_comm(rt_parhandle voidhandle, void * mpicomm) {
180 #ifdef MPI
181  parhandle *ph=(parhandle *) voidhandle;
182  if (ph->mpienabled) {
183  MPI_Comm *caller_comm = (MPI_Comm *) mpicomm;
184  if (caller_comm != NULL) {
185  /* If Tachyon is a client library within a larger app */
186  /* then we clean up any sub-communicators we may have created. */
187  if (ph->mpi_client && ph->owns_comm) {
188  MPI_Comm_free(&ph->comm);
189  }
190 
191  rt_par_comm_default(ph); /* Reset to known default state */
192  ph->mpi_client = 1; /* Tachyon is a client of a calling MPI code */
193  ph->owns_comm = 0; /* Caller created the communicator we're using */
194  ph->comm = *caller_comm; /* Use caller-provided communicator */
195  rt_par_comm_info(ph, &ph->comm);
196 
197  return 0;
198  }
199  }
200 #endif
201 
202  return -1; /* not supported for non-MPI builds */
203 }
204 
205 
206 int rt_par_set_mpi_comm_world(rt_parhandle voidhandle) {
207 #ifdef MPI
208  parhandle *ph=(parhandle *) voidhandle;
209  if (!ph->mpienabled) {
210  MPI_Comm comm = MPI_COMM_WORLD;
211  return rt_par_set_mpi_comm(voidhandle, &comm);
212  }
213 #endif
214 
215  return -1; /* not supported for non-MPI builds */
216 }
217 
218 
219 int rt_par_set_mpi_comm_split(rt_parhandle voidhandle, void * mpicomm,
220  int color, int key) {
221 #ifdef MPI
222  parhandle *ph=(parhandle *) voidhandle;
223  if (ph->mpienabled) {
224  MPI_Comm *caller_comm = (MPI_Comm *) mpicomm;
225  if (caller_comm != NULL) {
226  /* If Tachyon is a client library within a larger app */
227  /* then we clean up any sub-communicators we may have created. */
228  if (ph->mpi_client && ph->owns_comm) {
229  MPI_Comm_free(&ph->comm);
230  }
231 
232  rt_par_comm_default(ph); /* Reset to known default state */
233  ph->mpi_client = 1; /* Tachyon is a client of a calling MPI code */
234  ph->owns_comm = 1; /* Tachyon created the communicator it's using */
235  MPI_Comm_split(*caller_comm, color, key, &ph->comm);
236  rt_par_comm_info(ph, caller_comm);
237 
238  return 0;
239  }
240  }
241 #endif
242 
243  return -1; /* not supported for non-MPI builds */
244 }
245 
246 
247 int rt_par_set_mpi_comm_world_split(rt_parhandle voidhandle,
248  int color, int key) {
249 #ifdef MPI
250  parhandle *ph=(parhandle *) voidhandle;
251  if (ph->mpienabled) {
252  MPI_Comm comm = MPI_COMM_WORLD;
253  return rt_par_set_mpi_comm_split(voidhandle, &comm, color, key);
254  }
255 #endif
256 
257  return -1; /* not supported for non-MPI builds */
258 }
259 
260 
261 int rt_par_set_mpi_comm_world_split_all(rt_parhandle voidhandle) {
262 #ifdef MPI
263  parhandle *ph=(parhandle *) voidhandle;
264  if (ph->mpienabled) {
265  int myrank;
266  MPI_Comm comm = MPI_COMM_WORLD;
267  MPI_Comm_rank(comm, &myrank);
268  return rt_par_set_mpi_comm_split(voidhandle, &comm, myrank, 0);
269  }
270 #endif
271 
272  return -1; /* not supported for non-MPI builds */
273 }
274 
275 
276 int rt_par_finish(rt_parhandle voidhandle) {
277  parhandle *ph = (parhandle *) voidhandle;
278  int a=0; /* if sequential, do nothing */
279 
280  if (ph == NULL)
281  return -1;
282 
283 #ifdef MPI
284  if (ph->mpienabled) {
285  /* If Tachyon is a client library within a larger app */
286  /* then we only clean up any sub-communicators we may */
287  /* have created and free up data. If Tachyon is not */
288  /* a client, but is in charge of MPI, then we have to */
289  /* teardown everything at this point. */
290  if (ph->mpi_client && ph->owns_comm) {
291  MPI_Comm_free(&ph->comm);
292  } else {
293  free(ph); /* free the handle before calling MPI_Finalize() */
294  ph=NULL;
295 
296  /* If Tachyon initialized MPI for itself, */
297  /* then it must also shutdown MPI itself. */
298  MPI_Finalize();
299  }
300  }
301 #endif
302 
303  if (ph != NULL)
304  free(ph);
305 
306  return a;
307 }
308 
309 int rt_par_rank(rt_parhandle voidhandle) {
310  parhandle *ph = (parhandle *) voidhandle;
311  return ph->commrank;
312 }
313 
314 int rt_par_size(rt_parhandle voidhandle) {
315  parhandle *ph = (parhandle *) voidhandle;
316  return ph->commsize;
317 }
318 
319 void rt_par_barrier_sync(rt_parhandle voidhandle) {
320  /* if sequential, do nothing */
321 #ifdef MPI
322  parhandle *ph = (parhandle *) voidhandle;
323  if (ph->mpienabled)
324  MPI_Barrier(ph->comm);
325 #endif
326 }
327 
328 int rt_par_getcpuinfo(rt_parhandle voidhandle, nodeinfo **nodes) {
329  parhandle *ph = (parhandle *) voidhandle;
330  int numnodes = ph->commsize;
331  int mynode = ph->commrank;
332 #ifdef MPI
333  int namelen;
334  char namebuf[MPI_MAX_PROCESSOR_NAME];
335 #endif
336 
337  *nodes = (nodeinfo *) malloc(numnodes * sizeof(nodeinfo));
338  (*nodes)[mynode].numcpus = rt_thread_numprocessors();
339  (*nodes)[mynode].cpuspeed = 1.0;
340  (*nodes)[mynode].nodespeed = (*nodes)[mynode].numcpus *
341  (*nodes)[mynode].cpuspeed;
342  (*nodes)[mynode].cpucaps = NULL;
343 
344 #ifdef MPI
345  if (ph->mpienabled) {
346  MPI_Get_processor_name((char *) &namebuf, &namelen);
347  strncpy((char *) &(*nodes)[mynode].machname, namebuf,
348  (((namelen + 1) < 511) ? (namelen+1) : 511));
349 #if defined(USE_MPI_IN_PLACE)
350  MPI_Allgather(MPI_IN_PLACE, sizeof(nodeinfo), MPI_BYTE,
351  &(*nodes)[ 0], sizeof(nodeinfo), MPI_BYTE,
352  ph->comm);
353 #else
354  MPI_Allgather(&(*nodes)[mynode], sizeof(nodeinfo), MPI_BYTE,
355  &(*nodes)[ 0], sizeof(nodeinfo), MPI_BYTE,
356  ph->comm);
357 #endif
358  } else
359 #endif
360  {
361 #if defined(_MSC_VER)
362  strcpy((*nodes)[mynode].machname, "Windows");
363 #elif defined(MCOS)
364  strcpy((*nodes)[mynode].machname, "Mercury");
365 #else
366  gethostname((*nodes)[mynode].machname, 511);
367 #endif
368  (*nodes)[mynode].cpucaps = calloc(1, sizeof(rt_cpu_caps_t));
369  if (rt_cpu_capability_flags((rt_cpu_caps_t *) (*nodes)[mynode].cpucaps) != 0) {
370  free((*nodes)[mynode].cpucaps);
371  }
372  }
373 
374  return numnodes;
375 }
376 
377 
378 
379 /*
380  * Communications implementation based on MPI persistent send/recv operations.
381  *
382  * MPI Request buffers are allocated
383  *
384  * Persistent Send/Recv channels are initialized for the scanlines in
385  * the image(s) to be rendered.
386  *
387  * For each frame, the persistent communications are used once.
388  *
389  * After all frames are rendered, the persistent channels are closed down
390  * and the MPI Request buffers are freed.
391  */
392 
393 
394 void * rt_par_allocate_reqbuf(rt_parhandle voidhandle, int count) {
395 #ifdef MPI
396  parhandle * ph = (parhandle *) voidhandle;
397  if (ph->mpienabled) {
398  pardata * p;
399  p = malloc(sizeof(pardata));
400  p->totalrows = 0;
401  p->count = 0;
402  p->curmsg = 0;
403  p->haveinited = 0;
404  p->havestarted = 0;
405  p->requests = malloc(sizeof(MPI_Request)*count);
406  p->statuses = malloc(sizeof(MPI_Status)*count);
407  p->indices = malloc(sizeof(int)*count);
408  return p;
409  }
410 #endif
411  return NULL;
412 }
413 
414 void rt_par_free_reqbuf(rt_parhandle voidparhandle, rt_parbuf voidhandle) {
415 #ifdef MPI
416  parhandle * ph = (parhandle *) voidparhandle;
417  if (ph->mpienabled) {
418  pardata *p = (pardata *) voidhandle;
419 
420  if (p->requests != NULL)
421  free(p->requests);
422 
423  if (p->statuses != NULL)
424  free(p->statuses);
425 
426  if (p->indices != NULL)
427  free(p->indices);
428 
429  if (p != NULL)
430  free(p);
431  }
432 #endif
433 }
434 
435 
436 void * rt_par_init_scanlinereceives(rt_parhandle voidhandle, scenedef * scene) {
437 #ifdef MPI
438  parhandle *ph = (parhandle *) voidhandle;
439  if (ph->mpienabled) {
440  int i, addr;
441  pardata *p = (pardata *) rt_par_allocate_reqbuf(voidhandle, scene->vres);
442 
443  p->curmsg = 0;
444  p->totalrows = 0;
445  p->count = 0;
446  p->haveinited = 1;
447 
448  if (scene->imgbufformat == RT_IMAGE_BUFFER_RGB24) {
449  /* 24-bit RGB packed pixel format */
450  unsigned char *imgbuf = (unsigned char *) scene->img;
451 
452  if (ph->commrank == 0) {
453  for (i=0; i<scene->vres; i++) {
454  if (i % ph->commsize != ph->commrank) {
455  addr = i * scene->hres * 3;
456  MPI_Recv_init(&imgbuf[addr], scene->hres * 3, MPI_BYTE,
457  i % ph->commsize, i+1, ph->comm, &p->requests[p->count]);
458  p->count++; /* count of received rows */
459  } else {
460  p->totalrows++; /* count of our own rows */
461  }
462  }
463  } else {
464  for (i=0; i<scene->vres; i++) {
465  if (i % ph->commsize == ph->commrank) {
466  addr = i * scene->hres * 3;
467  MPI_Send_init(&imgbuf[addr], scene->hres * 3, MPI_BYTE,
468  0, i+1, ph->comm, &p->requests[p->count]);
469  p->count++; /* count of sent rows */
470  p->totalrows++; /* count of sent rows */
471  }
472  }
473  }
474  } else if (scene->imgbufformat == RT_IMAGE_BUFFER_RGB96F) {
475  /* 96-bit float RGB packed pixel format */
476  float *imgbuf = (float *) scene->img;
477 
478  if (ph->commrank == 0) {
479  for (i=0; i<scene->vres; i++) {
480  if (i % ph->commsize != ph->commrank) {
481  addr = i * scene->hres * 3;
482  MPI_Recv_init(&imgbuf[addr], scene->hres * 3, MPI_FLOAT,
483  i % ph->commsize, i+1, ph->comm, &p->requests[p->count]);
484  p->count++; /* count of received rows */
485  } else {
486  p->totalrows++; /* count of our own rows */
487  }
488  }
489  } else {
490  for (i=0; i<scene->vres; i++) {
491  if (i % ph->commsize == ph->commrank) {
492  addr = i * scene->hres * 3;
493  MPI_Send_init(&imgbuf[addr], scene->hres * 3, MPI_FLOAT,
494  0, i+1, ph->comm, &p->requests[p->count]);
495  p->count++; /* count of sent rows */
496  p->totalrows++; /* count of sent rows */
497  }
498  }
499  }
500  }
501 
502  return p;
503  }
504 #endif
505 
506  return NULL;
507 }
508 
509 
510 void rt_par_start_scanlinereceives(rt_parhandle voidparhandle, rt_parbuf voidhandle) {
511 #ifdef MPI
512  parhandle *ph = (parhandle *) voidparhandle;
513  if (ph->mpienabled) {
514  pardata *p = (pardata *) voidhandle;
515 
516  p->havestarted = 1;
517  if (ph->commrank == 0)
518  MPI_Startall(p->count, p->requests);
519 
520  p->curmsg = 0;
521  }
522 #endif
523 }
524 
525 void rt_par_waitscanlines(rt_parhandle voidparhandle, rt_parbuf voidhandle) {
526 #ifdef MPI
527  parhandle *ph = (parhandle *) voidparhandle;
528  if (ph->mpienabled) {
529  pardata *p = (pardata *) voidhandle;
530 
531  MPI_Waitall(p->count, p->requests, p->statuses);
532 
533  p->havestarted=0;
534  }
535 #endif
536 }
537 
538 void rt_par_delete_scanlinereceives(rt_parhandle voidparhandle, rt_parbuf voidhandle) {
539 #ifdef MPI
540  parhandle *ph = (parhandle *) voidparhandle;
541  if (ph->mpienabled) {
542  int i;
543  pardata *p = (pardata *) voidhandle;
544 
545  if (p == NULL)
546  return; /* don't bomb if no valid handle */
547 
548  if (p->haveinited != 0 || p->havestarted != 0) {
549  for (i=0; i<p->count; i++) {
550  MPI_Request_free(&p->requests[i]);
551  }
552  }
553 
554  rt_par_free_reqbuf(voidparhandle, voidhandle);
555  }
556 #endif
557 }
558 
559 int rt_par_sendrecvscanline_get_totalrows(rt_parhandle voidparhandle, rt_parbuf voidhandle) {
560 #ifdef MPI
561  parhandle *ph = (parhandle *) voidparhandle;
562  if (ph->mpienabled) {
563  pardata *p = (pardata *) voidhandle;
564  return p->totalrows;
565  }
566 #endif
567 
568  return 0;
569 }
570 
571 void rt_par_sendrecvscanline(rt_parhandle voidparhandle, rt_parbuf voidhandle) {
572 #ifdef MPI
573  parhandle *ph = (parhandle *) voidparhandle;
574  if (ph->mpienabled) {
575  pardata *p = (pardata *) voidhandle;
576 
577  if (ph->commrank == 0) {
578 #if MPI_TUNE == 0 || !defined(MPI_TUNE)
579  /*
580  * Default Technique
581  */
582  int outcount;
583  int numtorecv = ph->commsize - 1; /* all nodes but node 0 */
584 
585  int numtotest = (numtorecv < (p->count - p->curmsg)) ?
586  numtorecv : (p->count - p->curmsg);
587 
588  if (numtotest < 1) {
589  printf("Internal Tachyon MPI error, tried to recv zero/negative count!\n");
590  return;
591  }
592 
593  MPI_Testsome(numtotest, &p->requests[p->curmsg], &outcount,
594  &p->indices[p->curmsg], &p->statuses[p->curmsg]);
595  p->curmsg += numtorecv;
596 #elif MPI_TUNE == 1
597  /*
598  * Technique number 1
599  */
600  int index, flag;
601  MPI_Testany(p->count, p->requests, &index, &flag, p->statuses);
602 #elif MPI_TUNE == 2
603  /*
604  * Technique number 2
605  */
606  int flag;
607  MPI_Testall(p->count, p->requests, &flag, p->statuses);
608 #elif MPI_TUNE == 3
609  /*
610  * Technique number 3
611  */
612  int i, index, flag;
613  for (i=1; i<p->nodes; i++)
614  MPI_Testany(p->count, p->requests, &index, &flag, p->statuses);
615 #endif
616  } else {
617  if (p->curmsg >= p->count) {
618  printf("Internal Tachyon MPI error, tried to send oob count!\n");
619  return;
620  }
621  MPI_Start(&p->requests[p->curmsg]);
622  p->curmsg++;
623  }
624  }
625 #endif
626 }
627 
628 
void rt_par_free_reqbuf(rt_parhandle voidparhandle, rt_parbuf voidhandle)
Definition: parallel.c:414
int worldrank
Definition: parallel.c:65
int rt_par_finish(rt_parhandle voidhandle)
Definition: parallel.c:276
void rt_par_barrier_sync(rt_parhandle voidhandle)
Definition: parallel.c:319
static void rt_par_comm_default(parhandle *ph)
Definition: parallel.c:74
int commrank
Definition: parallel.c:69
int rt_par_set_mpi_comm(rt_parhandle voidhandle, void *mpicomm)
Definition: parallel.c:179
void rt_par_start_scanlinereceives(rt_parhandle voidparhandle, rt_parbuf voidhandle)
Definition: parallel.c:510
int owns_comm
Definition: parallel.c:59
rt_parhandle rt_par_init_nompi(void)
Definition: parallel.c:107
int rt_par_set_mpi_comm_split(rt_parhandle voidhandle, void *mpicomm, int color, int key)
Definition: parallel.c:219
void rt_par_delete_scanlinereceives(rt_parhandle voidparhandle, rt_parbuf voidhandle)
Definition: parallel.c:538
rt_parhandle rt_par_init_mpi_comm(void *mpicomm)
Definition: parallel.c:132
int mpi_client
Definition: parallel.c:58
void rt_par_waitscanlines(rt_parhandle voidparhandle, rt_parbuf voidhandle)
Definition: parallel.c:525
Tachyon cross-platform thread creation and management, atomic operations, and CPU feature query APIs...
int rt_par_rank(rt_parhandle voidhandle)
Definition: parallel.c:309
int rt_par_set_mpi_comm_world_split(rt_parhandle voidhandle, int color, int key)
Definition: parallel.c:247
int rt_par_getcpuinfo(rt_parhandle voidhandle, nodeinfo **nodes)
Definition: parallel.c:328
int rt_par_set_mpi_comm_world_split_all(rt_parhandle voidhandle)
Definition: parallel.c:261
int callsize
Definition: parallel.c:68
int rt_par_sendrecvscanline_get_totalrows(rt_parhandle voidparhandle, rt_parbuf voidhandle)
Definition: parallel.c:559
int rt_thread_numprocessors(void)
number of processors available, subject to user override
Definition: threads.c:202
int commsize
Definition: parallel.c:70
rt_parhandle rt_par_init_mpi_comm_split(void *mpicomm, int color, int key)
Definition: parallel.c:160
Tachyon cross-platform timers, special math function wrappers, and RNGs.
int rt_cpu_capability_flags(rt_cpu_caps_t *cpucaps)
CPU optional instruction set capability flags.
Definition: threads.c:281
int worldsize
Definition: parallel.c:66
int mpienabled
Definition: parallel.c:57
int rt_par_size(rt_parhandle voidhandle)
Definition: parallel.c:314
void rt_par_sendrecvscanline(rt_parhandle voidparhandle, rt_parbuf voidhandle)
Definition: parallel.c:571
void * rt_par_init_scanlinereceives(rt_parhandle voidhandle, scenedef *scene)
Definition: parallel.c:436
rt_parhandle rt_par_init_mpi_comm_world(void)
Definition: parallel.c:151
rt_parhandle rt_par_init(int *argc, char ***argv)
Definition: parallel.c:115
void * rt_par_allocate_reqbuf(rt_parhandle voidhandle, int count)
Definition: parallel.c:394
int callrank
Definition: parallel.c:67
Tachyon public API function prototypes and declarations used to drive the ray tracing engine...
int rt_par_set_mpi_comm_world(rt_parhandle voidhandle)
Definition: parallel.c:206