Tachyon (current)  Current Main Branch
trace.c
Go to the documentation of this file.
1 /*
2  * trace.c - This file contains the functions for firing primary rays
3  * and handling subsequent calculations
4  *
5  * (C) Copyright 1994-2022 John E. Stone
6  * SPDX-License-Identifier: BSD-3-Clause
7  *
8  * $Id: trace.c,v 1.145 2022/03/14 03:50:38 johns Exp $
9  *
10  */
11 
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <math.h>
16 
17 #define TACHYON_INTERNAL 1
18 #include "tachyon.h"
19 #include "macros.h"
20 #include "vector.h"
21 #include "shade.h"
22 #include "camera.h"
23 #include "util.h"
24 #include "threads.h"
25 #include "parallel.h"
26 #include "intersect.h"
27 #include "ui.h"
28 #include "trace.h"
29 #if defined(_OPENMP)
30 #include <omp.h>
31 #endif
32 
33 color trace(ray * primary) {
34  if (primary->depth > 0) {
35  intersect_objects(primary);
36  return primary->scene->shader(primary);
37  }
38 
39  /* if the ray is truncated, return the background texture as its color */
40  return primary->scene->bgtexfunc(primary);
41 }
42 
43 
44 #if defined(MPI)
45 int node_row_sendrecv(int my_tid, thr_parms * t, scenedef *scene,
46  int *sentrows, int y) {
47  /* If running with MPI and we have multiple nodes, we must exchange */
48  /* pixel data for each row of the output image as we run. */
49  if (scene->nodes > 1) {
50 
51 #if defined(THR)
52  /* When mixing threads+MPI, we have to ensure that all threads in */
53  /* a given node have completed a row of pixels before we try to */
54  /* send it, which requires a barrier synchronization. */
55 
56 #if defined(USEATOMICBARRIERS)
57  /*
58  * Use fast atomic integer ops for per-row MPI sendrecv barriers
59  */
60  int rowidx = y - 1;
61  int rowbarcnt;
62  int rowsdone=-1;
63 
64  rowbarcnt = rt_atomic_int_add_and_fetch(&t->rowbars[rowidx], 1);
65 
66  /* if we were the last thread to read the barrier, we increment the */
67  /* rowsdone counter and continue on... */
68  if (rowbarcnt == t->nthr) {
69 /* printf("node[%d] thr[%d] rowidx: %d\n", scene->mynode, my_tid, rowidx); */
70  rowsdone = rt_atomic_int_add_and_fetch(t->rowsdone, 1);
71 
72  /* clear the row barrier so it is ready to be used again... */
73  rt_atomic_int_set(&t->rowbars[rowidx], 0);
74  }
75 
76  /* Since only thread 0 can make MPI calls, it checks how many rows */
77  /* are done and sends any completed rows that weren't already sent */
78  if (my_tid == 0) {
79  int row;
80 
81  /* if we've already got rowsdone from a previous fetch-and-add, */
82  /* we use it, otherwise we have to actually query it... */
83  if (rowsdone < 0)
84  rowsdone = rt_atomic_int_get(t->rowsdone);
85 
86  /* send any rows that are completed but not already sent */
87  for (row=(*sentrows); row<rowsdone; row++) {
88 /* printf("node[%d] sending row: %d (sentrows %d)\n", scene->mynode, row, *sentrows); */
89  /* only thread 0 can use MPI */
90  rt_par_sendrecvscanline(scene->parhnd, scene->parbuf);
91 /* printf("node[%d] row: %d sent!\n", scene->mynode, row); */
92  }
93  *sentrows = row;
94  }
95 #else
96  /*
97  * Use the threadpool barriers to synchronize all worker threads
98  * prior to invoking the MPI sendrecv operations. This kind of
99  * barrier is very costly for real-time renderings, so it has been
100  * replaced by faster atomic counters.
101  */
102  rt_thread_barrier(t->runbar, 1);
103 
104  /* after all worker threads have completed the row, we can send it */
105  if (my_tid == 0) {
106  rt_par_sendrecvscanline(scene->parhnd, scene->parbuf); /* only thread 0 can use MPI */
107  }
108 #endif
109 #else
110  /* For OpenMP, we must also check that we are thread ID 0 */
111  if (my_tid == 0) {
112  rt_par_sendrecvscanline(scene->parhnd, scene->parbuf); /* only thread 0 can use MPI */
113  }
114 #endif
115 
116  /* Since all rows are stored in different memory locations */
117  /* there's no need to protect against race conditions between */
118  /* thread 0 MPI calls and ongoing work by peer threads running */
119  /* farther ahead on subsequent rows. */
120  }
121 
122  return 0;
123 }
124 
125 
126 int node_finish_row_sendrecvs(int my_tid, thr_parms * t, scenedef *scene, int *sentrows) {
127 
128  if (scene->nodes > 1) {
129 #if defined(THR)
130  /* When mixing threads+MPI, we have to ensure that in the case that */
131  /* thread 0 of node 0 finishes early, we force it to finish handling */
132  /* all oustanding row transfers before it returns. */
133 #if defined(USEATOMICBARRIERS)
134 #if 1
135  /* XXX this barrier is very costly for real-time renderings, so it */
136  /* is a candidate for replacement by a busy-wait.. */
137  rt_thread_barrier(t->runbar, 1);
138 #else
139  /* wait for all peer threads to complete */
140  if (my_tid == 0) {
141  int rowsdone, totalrows;
142  totalrows = rt_par_sendrecvscanline_get_totalrows(scene->parhnd, scene->parbuf);
143 
144 /* printf("node[%d]: spinning waiting for totalrows: %d\n", scene->mynode, totalrows); */
145 
146  /* spin on the 'rowsdone' integer atomic counter */
147  while ((rowsdone = rt_atomic_int_get(t->rowsdone)) < totalrows) {
148 /* printf("node[%d]: spinning waiting, rowsdone: %d totalrows: %d\n", scene->mynode, rowsdone, totalrows); */
149  }
150  }
151 #endif
152 
153  /* Since only thread 0 can make MPI calls, it checks how many rows */
154  /* are done and sends any completed rows that weren't already sent */
155  if (my_tid == 0) {
156  int row;
157  int rowsdone = rt_atomic_int_get(t->rowsdone);
158 /* printf("node[%d] finish sendrecvs, rowsdone: %d sentrows: %d\n", scene->mynode, rowsdone, *sentrows); */
159  /* send any rows that are completed but not already sent */
160  for (row=(*sentrows); row<rowsdone; row++) {
161 /* printf("node[%d] sending row: %d (finishing)\n", scene->mynode, row); */
162  /* only thread 0 can use MPI */
163  rt_par_sendrecvscanline(scene->parhnd, scene->parbuf);
164 /* printf("node[%d] row: %d sent! (finishing)\n", scene->mynode, row); */
165  }
166  *sentrows = row;
167  }
168 #else
169  /* nothing to do for the old variant of the code since it kept all */
170  /* worker threads in lockstep... */
171 #endif
172 #else
173  /* nothing to do for OpenMP or other scenarios */
174 #endif
175  }
176 
177  return 0;
178 }
179 #endif /* MPI */
180 
181 
182 void convert_rgb96f_rgb24u(color col, unsigned char *img) {
183  int R = (int) (col.r * 255.0f); /* quantize float to integer */
184  int G = (int) (col.g * 255.0f); /* quantize float to integer */
185  int B = (int) (col.b * 255.0f); /* quantize float to integer */
186 
187  if (R > 255) R = 255; /* clamp pixel value to range 0-255 */
188  if (R < 0) R = 0;
189  img[0] = (byte) R; /* Store final pixel to the image buffer */
190 
191  if (G > 255) G = 255; /* clamp pixel value to range 0-255 */
192  if (G < 0) G = 0;
193  img[1] = (byte) G; /* Store final pixel to the image buffer */
194 
195  if (B > 255) B = 255; /* clamp pixel value to range 0-255 */
196  if (B < 0) B = 0;
197  img[2] = (byte) B; /* Store final pixel to the image buffer */
198 }
199 
200 
201 void * thread_trace(thr_parms * t) {
202 #if defined(_OPENMP)
203 #pragma omp parallel default( none ) firstprivate(t)
204 {
205 #endif
206  unsigned long * local_mbox = NULL;
207  scenedef * scene;
208  color col;
209  ray primary;
210  int x, y, do_ui, hskip;
211  int startx, stopx, xinc, starty, stopy, yinc, hsize, vres;
212  rng_frand_handle cachefrng; /* Hold cached FP RNG state */
213 #if defined(RT_ACCUMULATE_ON)
214  float accum_norm = 1.0f;
215 #endif
216 
217 #if defined(MPI)
218  int sentrows = 0; /* no rows sent yet */
219 #endif
220 
221 #if defined(_OPENMP)
222  int my_tid = omp_get_thread_num(); /* get OpenMP thread ID */
223  unsigned long my_serialno = 1; /* XXX should restore previous serialno */
224 #else
225  int my_tid = t->tid;
226  unsigned long my_serialno = t->serialno;
227 #endif
228 
229  /*
230  * Copy all of the frequently used parameters into local variables.
231  * This seems to improve performance, especially on NUMA systems.
232  */
233  startx = t->startx;
234  stopx = t->stopx;
235  xinc = t->xinc;
236 
237  starty = t->starty;
238  stopy = t->stopy;
239  yinc = t->yinc;
240 
241  scene = t->scene;
242  hsize = scene->hres*3;
243  vres = scene->vres;
244  hskip = xinc * 3;
245  do_ui = (scene->mynode == 0 && my_tid == 0);
246 
247 #if !defined(DISABLEMBOX)
248  /* allocate mailbox array per thread... */
249 #if defined(_OPENMP)
250  local_mbox = (unsigned long *)calloc(sizeof(unsigned long)*scene->objgroup.numobjects, 1);
251 #else
252  if (t->local_mbox == NULL)
253  local_mbox = (unsigned long *)calloc(sizeof(unsigned long)*scene->objgroup.numobjects, 1);
254  else
255  local_mbox = t->local_mbox;
256 #endif
257 #else
258  local_mbox = NULL; /* mailboxes are disabled */
259 #endif
260 
261 #if defined(RT_ACCUMULATE_ON)
262  /* calculate accumulation buffer normalization factor */
263  if (scene->accum_count > 0)
264  accum_norm = 1.0f / scene->accum_count;
265 #endif
266 
267  /*
268  * When compiled on platforms with a 64-bit long, ray serial numbers won't
269  * wraparound in _anyone's_ lifetime, so there's no need to even check....
270  * On lesser-bit platforms, we're not quite so lucky, so we have to check.
271  * We use a sizeof() check so that we can eliminate the LP64 macro tests
272  * and eventually simplify the Makefiles.
273  */
274  if (sizeof(unsigned long) < 8) {
275  /*
276  * If we are getting close to integer wraparound on the
277  * ray serial numbers, we need to re-clear the mailbox
278  * array(s). Each thread maintains its own serial numbers
279  * so only those threads that are getting hit hard will
280  * need to re-clear their mailbox arrays. In all likelihood,
281  * the threads will tend to hit their counter limits at about
282  * the same time though.
283  */
284  if (local_mbox != NULL) {
285  /* reset counters if serial exceeds 1/8th largest possible ulong */
286  if (my_serialno > (((unsigned long) 1) << ((sizeof(unsigned long) * 8) - 3))) {
287  memset(local_mbox, 0, sizeof(unsigned long)*scene->objgroup.numobjects);
288  my_serialno = 1;
289  }
290  }
291  }
292 
293  /* setup the thread-specific properties of the primary ray(s) */
294  {
295  unsigned int rngseed = tea4(my_tid, scene->mynode);
296 #if defined(RT_ACCUMULATE_ON)
297  rngseed = tea4(scene->accum_count, rngseed);
298  camray_init(scene, &primary, my_serialno, local_mbox, rngseed,
299  tea4(scene->accum_count, scene->accum_count));
300 #else
301  // unsigned int rngseed = rng_seed_from_tid_nodeid(my_tid, scene->mynode);
302  camray_init(scene, &primary, my_serialno, local_mbox, rngseed, rngseed);
303 #endif
304  }
305 
306  /* copy the RNG state to cause increased coherence among */
307  /* AO sample rays, significantly reducing granulation */
308  cachefrng = primary.frng;
309 
310  /*
311  * Render the image in either RGB24 or RGB96F format
312  */
313  if (scene->imgbufformat == RT_IMAGE_BUFFER_RGB24) {
314  /* 24-bit unsigned char RGB, RT_IMAGE_BUFFER_RGB24 */
315  unsigned char *img = (unsigned char *) scene->img;
316 
317 #if defined(THR) && !defined(MPI)
318  /* implement dynamic pixel scheduler */
319  if (t->sched_dynamic) {
320  int pixel;
321  int end = scene->hres * scene->vres - 1;
322  int tilesz = 32;
323 
324  while ((pixel = rt_atomic_int_fetch_and_add(t->pixelsched, tilesz)) <= end) {
325  int tpxl;
326  int mystart=pixel;
327  int myend=pixel+tilesz-1;
328  if (myend > end)
329  myend = end;
330  for (tpxl=mystart; tpxl<=myend; tpxl++) {
331  int y = tpxl / scene->hres;
332  int x = tpxl - (y*scene->hres);
333  unsigned int idx = y*scene->hres + x; /* 1-D pixel index */
334 #if defined(RT_ACCUMULATE_ON)
335  if (scene->accum_count) {
336  primary.randval = tea4(idx, scene->accum_count);
337  }
338 #endif
339 
340  int addr = hsize * y + (3 * (startx - 1 + x)); /* row address */
341 
342  primary.idx = idx; /* used for idx-based RNG seeding */
343  primary.frng = cachefrng; /* each pixel uses the same AO RNG seed */
344  col=scene->camera.cam_ray(&primary, x, y); /* generate ray */
345 
346 #if defined(RT_ACCUMULATE_ON)
347  /* accumulate and normalize if enabled */
348  if (scene->accum_buf != NULL) {
349  scene->accum_buf[addr ] += col.r;
350  col.r = scene->accum_buf[addr ] * accum_norm;
351  scene->accum_buf[addr + 1] += col.g;
352  col.g = scene->accum_buf[addr + 1] * accum_norm;
353  scene->accum_buf[addr + 2] += col.b;
354  col.b = scene->accum_buf[addr + 2] * accum_norm;
355  }
356 #endif
357 
358  convert_rgb96f_rgb24u(col, &img[addr]);
359  }
360 
361  if (do_ui && !(mystart % (16*scene->hres))) {
362  rt_ui_progress((100L * mystart) / end); /* call progress meter callback */
363  }
364  }
365  } else {
366 #endif
367 
368 #if defined(_OPENMP)
369 #pragma omp for schedule(runtime)
370 #endif
371  for (y=starty; y<=stopy; y+=yinc) {
372  int addr = hsize * (y - 1) + (3 * (startx - 1)); /* row address */
373  for (x=startx; x<=stopx; x+=xinc,addr+=hskip) {
374  unsigned int idx = y*scene->hres + x; /* 1-D pixel index */
375 #if defined(RT_ACCUMULATE_ON)
376  if (scene->accum_count) {
377  primary.randval = tea4(idx, scene->accum_count);
378  }
379 #endif
380 
381  primary.idx = idx; /* used for idx-based RNG seeding */
382  primary.frng = cachefrng; /* each pixel uses the same AO RNG seed */
383  col=scene->camera.cam_ray(&primary, x, y); /* generate ray */
384 
385 #if defined(RT_ACCUMULATE_ON)
386  /* accumulate and normalize if enabled */
387  if (scene->accum_buf != NULL) {
388  scene->accum_buf[addr ] += col.r;
389  col.r = scene->accum_buf[addr ] * accum_norm;
390  scene->accum_buf[addr + 1] += col.g;
391  col.g = scene->accum_buf[addr + 1] * accum_norm;
392  scene->accum_buf[addr + 2] += col.b;
393  col.b = scene->accum_buf[addr + 2] * accum_norm;
394  }
395 #endif
396 
397  convert_rgb96f_rgb24u(col, &img[addr]);
398  } /* end of x-loop */
399 
400  if (do_ui && !((y-1) % 16)) {
401  rt_ui_progress((100L * y) / vres); /* call progress meter callback */
402  }
403 
404 #if defined(MPI)
405  /* Ensure all threads have completed this row, then send it */
406  node_row_sendrecv(my_tid, t, scene, &sentrows, y);
407 #endif
408  } /* end y-loop */
409 
410 #if defined(THR) && !defined(MPI)
411  } /* end of dynamic scheduler test */
412 #endif
413 
414  } else { /* end of RGB24 loop */
415  /* 96-bit float RGB, RT_IMAGE_BUFFER_RGB96F */
416  int addr;
417  float *img = (float *) scene->img;
418 
419 #if defined(THR) && !defined(MPI)
420  /* implement dynamic pixel scheduler */
421  if (t->sched_dynamic) {
422  int pixel;
423  int end = scene->hres * scene->vres - 1;
424  int tilesz = 32;
425 
426  while ((pixel = rt_atomic_int_fetch_and_add(t->pixelsched, tilesz)) <= end) {
427  int tpxl;
428  int mystart=pixel;
429  int myend=pixel+tilesz-1;
430  if (myend > end)
431  myend = end;
432  for (tpxl=mystart; tpxl<=myend; tpxl++) {
433  int y = tpxl / scene->hres;
434  int x = tpxl - (y*scene->hres);
435  unsigned int idx = y*scene->hres + x; /* 1-D pixel index */
436 
437  addr = hsize * y + (3 * (startx - 1 + x)); /* row address */
438 
439 #if defined(RT_ACCUMULATE_ON)
440  if (scene->accum_count) {
441  primary.randval = tea4(idx, scene->accum_count);
442  }
443 #endif
444 
445  primary.idx = idx; /* used for idx-based RNG seeding */
446  primary.frng = cachefrng; /* each pixel uses the same AO RNG seed */
447  col=scene->camera.cam_ray(&primary, x, y); /* generate ray */
448 
449 #if defined(RT_ACCUMULATE_ON)
450  /* accumulate and normalize if enabled */
451  if (scene->accum_buf != NULL) {
452  scene->accum_buf[addr ] += col.r;
453  col.r = scene->accum_buf[addr ] * accum_norm;
454  scene->accum_buf[addr + 1] += col.g;
455  col.g = scene->accum_buf[addr + 1] * accum_norm;
456  scene->accum_buf[addr + 2] += col.b;
457  col.b = scene->accum_buf[addr + 2] * accum_norm;
458  }
459 #endif
460 
461  img[addr ] = col.r; /* Store final pixel to the image buffer */
462  img[addr + 1] = col.g; /* Store final pixel to the image buffer */
463  img[addr + 2] = col.b; /* Store final pixel to the image buffer */
464  }
465 
466  if (do_ui && !(mystart % (16*scene->hres))) {
467  rt_ui_progress((100L * mystart) / end); /* call progress meter callback */
468  }
469  }
470  } else {
471 #endif
472 
473 #if defined(_OPENMP)
474 #pragma omp for schedule(runtime)
475 #endif
476  for (y=starty; y<=stopy; y+=yinc) {
477  addr = hsize * (y - 1) + (3 * (startx - 1)); /* row address */
478  for (x=startx; x<=stopx; x+=xinc,addr+=hskip) {
479  unsigned int idx = y*scene->hres + x; /* 1-D pixel index */
480 #if defined(RT_ACCUMULATE_ON)
481  if (scene->accum_count) {
482  primary.randval = tea4(idx, scene->accum_count);
483  }
484 #endif
485 
486  primary.idx = idx; /* used for idx-based RNG seeding */
487  primary.frng = cachefrng; /* each pixel uses the same AO RNG seed */
488  col=scene->camera.cam_ray(&primary, x, y); /* generate ray */
489 
490 #if defined(RT_ACCUMULATE_ON)
491  /* accumulate and normalize if enabled */
492  if (scene->accum_buf != NULL) {
493  scene->accum_buf[addr ] += col.r;
494  col.r = scene->accum_buf[addr ] * accum_norm;
495  scene->accum_buf[addr + 1] += col.g;
496  col.g = scene->accum_buf[addr + 1] * accum_norm;
497  scene->accum_buf[addr + 2] += col.b;
498  col.b = scene->accum_buf[addr + 2] * accum_norm;
499  }
500 #endif
501 
502  img[addr ] = col.r; /* Store final pixel to the image buffer */
503  img[addr + 1] = col.g; /* Store final pixel to the image buffer */
504  img[addr + 2] = col.b; /* Store final pixel to the image buffer */
505  } /* end of x-loop */
506 
507  if (do_ui && !((y-1) % 16)) {
508  rt_ui_progress((100L * y) / vres); /* call progress meter callback */
509  }
510 
511 #if defined(MPI)
512  /* Ensure all threads have completed this row, then send it */
513  node_row_sendrecv(my_tid, t, scene, &sentrows, y);
514 #endif
515  } /* end y-loop */
516 
517 #if defined(THR) && !defined(MPI)
518  } /* end of dynamic scheduler test */
519 #endif
520 
521  } /* end of RGB96F loop */
522 
523 
524  /*
525  * Image has been rendered into the buffer in the appropriate pixel format
526  */
527  my_serialno = primary.serial + 1;
528 
529 #if defined(_OPENMP)
530  /* XXX The OpenMP code needs to find a way to save serialno for next */
531  /* rendering pass, otherwise we need to force-clear the mailbox */
532  /* t->serialno = my_serialno; */ /* save our serialno for next launch */
533 
534  /* XXX until we save/restore serial numbers, we have to clear the */
535  /* mailbox before the next rendering pass */
536  if (sizeof(unsigned long) < 8) {
537  memset(local_mbox, 0, sizeof(unsigned long)*scene->objgroup.numobjects);
538  }
539 
540  if (local_mbox != NULL)
541  free(local_mbox);
542 #else
543  t->serialno = my_serialno; /* save our serialno for next launch */
544 
545  if (t->local_mbox == NULL) {
546  if (local_mbox != NULL)
547  free(local_mbox);
548  }
549 #endif
550 
551  /* ensure all threads have completed their pixels before return */
552  if (scene->nodes == 1)
553  rt_thread_barrier(t->runbar, 1);
554 #if defined(MPI)
555  else
556  node_finish_row_sendrecvs(my_tid, t, scene, &sentrows);
557 #endif
558 
559 /* printf("node[%d] thr[%d] done! *****************************\n", scene->mynode, my_tid); */
560 
561 #if defined(_OPENMP)
562  }
563 #endif
564 
565  return(NULL);
566 }
567 
int stopx
ending X pixel index
Definition: trace.h:18
void convert_rgb96f_rgb24u(color col, unsigned char *img)
Definition: trace.c:182
int rt_atomic_int_get(rt_atomic_int_t *atomp)
get an atomic int variable
Definition: threads.c:1123
int rt_thread_barrier(rt_barrier_t *barrier, int increment)
synchronize on counting barrier primitive
Definition: threads.c:1425
int nthr
total number of worker threads
Definition: trace.h:13
scenedef * scene
scene handle
Definition: trace.h:14
rt_barrier_t * runbar
sleeping thread pool barrier
Definition: trace.h:23
int starty
starting Y pixel index
Definition: trace.h:20
Tachyon cross-platform thread creation and management, atomic operations, and CPU feature query APIs...
int startx
starting X pixel index
Definition: trace.h:17
int rt_atomic_int_fetch_and_add(rt_atomic_int_t *atomp, int inc)
fetch an atomic int and add inc to it, returning original value
Definition: threads.c:1152
int rt_par_sendrecvscanline_get_totalrows(rt_parhandle voidparhandle, rt_parbuf voidhandle)
Definition: parallel.c:559
void * thread_trace(thr_parms *t)
Definition: trace.c:201
unsigned long * local_mbox
grid acceleration mailbox structure
Definition: trace.h:15
color trace(ray *primary)
Definition: trace.c:33
int yinc
Y pixel stride.
Definition: trace.h:22
Tachyon cross-platform timers, special math function wrappers, and RNGs.
int xinc
X pixel stride.
Definition: trace.h:19
unsigned long serialno
ray mailbox test serial number
Definition: trace.h:16
void rt_par_sendrecvscanline(rt_parhandle voidparhandle, rt_parbuf voidhandle)
Definition: parallel.c:571
int rt_atomic_int_set(rt_atomic_int_t *atomp, int val)
set an atomic int variable
Definition: threads.c:1087
unsigned int tea4(unsigned int v0, unsigned int v1)
Definition: util.c:626
void camray_init(scenedef *scene, ray *primary, unsigned long serial, unsigned long *mbox, unsigned int aarandval, unsigned int aorandval)
Definition: camera.c:186
void rt_ui_progress(int percent)
Definition: ui.c:36
Tachyon public API function prototypes and declarations used to drive the ray tracing engine...
int rt_atomic_int_add_and_fetch(rt_atomic_int_t *atomp, int inc)
fetch an atomic int and add inc to it, returning new value
Definition: threads.c:1180
int tid
worker thread index
Definition: trace.h:12
int stopy
ending Y pixel index
Definition: trace.h:21
void intersect_objects(ray *ry)
Definition: intersect.c:47