w32tex
About: TeX Live provides a comprehensive TeX system including all the major TeX-related programs, macro packages, and fonts that are free software. Windows sources.
  Fossies Dox: w32tex-src.tar.xz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

cairo-clip-tor-scan-converter.c
Go to the documentation of this file.
1 /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
2 /* glitter-paths - polygon scan converter
3  *
4  * Copyright (c) 2008 M Joonas Pihlaja
5  * Copyright (c) 2007 David Turner
6  *
7  * Permission is hereby granted, free of charge, to any person
8  * obtaining a copy of this software and associated documentation
9  * files (the "Software"), to deal in the Software without
10  * restriction, including without limitation the rights to use,
11  * copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following
14  * conditions:
15  *
16  * The above copyright notice and this permission notice shall be
17  * included in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 /* This is the Glitter paths scan converter incorporated into cairo.
29  * The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
30  * of
31  *
32  * https://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
33  */
34 /* Glitter-paths is a stand alone polygon rasteriser derived from
35  * David Turner's reimplementation of Tor Anderssons's 15x17
36  * supersampling rasteriser from the Apparition graphics library. The
37  * main new feature here is cheaply choosing per-scan line between
38  * doing fully analytical coverage computation for an entire row at a
39  * time vs. using a supersampling approach.
40  *
41  * David Turner's code can be found at
42  *
43  * http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
44  *
45  * In particular this file incorporates large parts of ftgrays_tor10.h
46  * from raster-comparison-20070813.tar.bz2
47  */
48 /* Overview
49  *
50  * A scan converter's basic purpose to take polygon edges and convert
51  * them into an RLE compressed A8 mask. This one works in two phases:
52  * gathering edges and generating spans.
53  *
54  * 1) As the user feeds the scan converter edges they are vertically
55  * clipped and bucketted into a _polygon_ data structure. The edges
56  * are also snapped from the user's coordinates to the subpixel grid
57  * coordinates used during scan conversion.
58  *
59  * user
60  * |
61  * | edges
62  * V
63  * polygon buckets
64  *
65  * 2) Generating spans works by performing a vertical sweep of pixel
66  * rows from top to bottom and maintaining an _active_list_ of edges
67  * that intersect the row. From the active list the fill rule
68  * determines which edges are the left and right edges of the start of
69  * each span, and their contribution is then accumulated into a pixel
70  * coverage list (_cell_list_) as coverage deltas. Once the coverage
71  * deltas of all edges are known we can form spans of constant pixel
72  * coverage by summing the deltas during a traversal of the cell list.
73  * At the end of a pixel row the cell list is sent to a coverage
74  * blitter for rendering to some target surface.
75  *
76  * The pixel coverages are computed by either supersampling the row
77  * and box filtering a mono rasterisation, or by computing the exact
78  * coverages of edges in the active list. The supersampling method is
79  * used whenever some edge starts or stops within the row or there are
80  * edge intersections in the row.
81  *
82  * polygon bucket for \
83  * current pixel row |
84  * | |
85  * | activate new edges | Repeat GRID_Y times if we
86  * V \ are supersampling this row,
87  * active list / or just once if we're computing
88  * | | analytical coverage.
89  * | coverage deltas |
90  * V |
91  * pixel coverage list /
92  * |
93  * V
94  * coverage blitter
95  */
96 #include "cairoint.h"
97 #include "cairo-spans-private.h"
98 #include "cairo-error-private.h"
99 
100 #include <assert.h>
101 #include <stdlib.h>
102 #include <string.h>
103 #include <limits.h>
104 #include <setjmp.h>
105 
106 /* The input coordinate scale and the rasterisation grid scales. */
107 #define GLITTER_INPUT_BITS CAIRO_FIXED_FRAC_BITS
108 #define GRID_X_BITS CAIRO_FIXED_FRAC_BITS
109 #define GRID_Y 15
110 
111 /* Set glitter up to use a cairo span renderer to do the coverage
112  * blitting. */
113 struct pool;
114 struct cell_list;
115 
116 /*-------------------------------------------------------------------------
117  * glitter-paths.h
118  */
119 
120 /* "Input scaled" numbers are fixed precision reals with multiplier
121  * 2**GLITTER_INPUT_BITS. Input coordinates are given to glitter as
122  * pixel scaled numbers. These get converted to the internal grid
123  * scaled numbers as soon as possible. Internal overflow is possible
124  * if GRID_X/Y inside glitter-paths.c is larger than
125  * 1<<GLITTER_INPUT_BITS. */
126 #ifndef GLITTER_INPUT_BITS
127 # define GLITTER_INPUT_BITS 8
128 #endif
129 #define GLITTER_INPUT_SCALE (1<<GLITTER_INPUT_BITS)
131 
132 /* Opaque type for scan converting. */
134 
135 /*-------------------------------------------------------------------------
136  * glitter-paths.c: Implementation internal types
137  */
138 #include <stdlib.h>
139 #include <string.h>
140 #include <limits.h>
141 
142 /* All polygon coordinates are snapped onto a subsample grid. "Grid
143  * scaled" numbers are fixed precision reals with multiplier GRID_X or
144  * GRID_Y. */
145 typedef int grid_scaled_t;
146 typedef int grid_scaled_x_t;
147 typedef int grid_scaled_y_t;
148 
149 /* Default x/y scale factors.
150  * You can either define GRID_X/Y_BITS to get a power-of-two scale
151  * or define GRID_X/Y separately. */
152 #if !defined(GRID_X) && !defined(GRID_X_BITS)
153 # define GRID_X_BITS 8
154 #endif
155 #if !defined(GRID_Y) && !defined(GRID_Y_BITS)
156 # define GRID_Y 15
157 #endif
158 
159 /* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
160 #ifdef GRID_X_BITS
161 # define GRID_X (1 << GRID_X_BITS)
162 #endif
163 #ifdef GRID_Y_BITS
164 # define GRID_Y (1 << GRID_Y_BITS)
165 #endif
166 
167 /* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
168  * integer and fractional parts. The integer part is floored. */
169 #if defined(GRID_X_TO_INT_FRAC)
170  /* do nothing */
171 #elif defined(GRID_X_BITS)
172 # define GRID_X_TO_INT_FRAC(x, i, f) \
173  _GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)
174 #else
175 # define GRID_X_TO_INT_FRAC(x, i, f) \
176  _GRID_TO_INT_FRAC_general(x, i, f, GRID_X)
177 #endif
178 
179 #define _GRID_TO_INT_FRAC_general(t, i, f, m) do { \
180  (i) = (t) / (m); \
181  (f) = (t) % (m); \
182  if ((f) < 0) { \
183  --(i); \
184  (f) += (m); \
185  } \
186 } while (0)
187 
188 #define _GRID_TO_INT_FRAC_shift(t, i, f, b) do { \
189  (f) = (t) & ((1 << (b)) - 1); \
190  (i) = (t) >> (b); \
191 } while (0)
192 
193 /* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y. We want
194  * to be able to represent exactly areas of subpixel trapezoids whose
195  * vertices are given in grid scaled coordinates. The scale factor
196  * comes from needing to accurately represent the area 0.5*dx*dy of a
197  * triangle with base dx and height dy in grid scaled numbers. */
198 typedef int grid_area_t;
199 #define GRID_XY (2*GRID_X*GRID_Y) /* Unit area on the grid. */
200 
201 /* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
202 #if GRID_XY == 510
203 # define GRID_AREA_TO_ALPHA(c) (((c)+1) >> 1)
204 #elif GRID_XY == 255
205 # define GRID_AREA_TO_ALPHA(c) (c)
206 #elif GRID_XY == 64
207 # define GRID_AREA_TO_ALPHA(c) (((c) << 2) | -(((c) & 0x40) >> 6))
208 #elif GRID_XY == 128
209 # define GRID_AREA_TO_ALPHA(c) ((((c) << 1) | -((c) >> 7)) & 255)
210 #elif GRID_XY == 256
211 # define GRID_AREA_TO_ALPHA(c) (((c) | -((c) >> 8)) & 255)
212 #elif GRID_XY == 15
213 # define GRID_AREA_TO_ALPHA(c) (((c) << 4) + (c))
214 #elif GRID_XY == 2*256*15
215 # define GRID_AREA_TO_ALPHA(c) (((c) + ((c)<<4) + 256) >> 9)
216 #else
217 # define GRID_AREA_TO_ALPHA(c) (((c)*255 + GRID_XY/2) / GRID_XY)
218 #endif
219 
220 #define UNROLL3(x) x x x
221 
222 struct quorem {
225 };
226 
227 /* Header for a chunk of memory in a memory pool. */
228 struct _pool_chunk {
229  /* # bytes used in this chunk. */
230  size_t size;
231 
232  /* # bytes total in this chunk */
233  size_t capacity;
234 
235  /* Pointer to the previous chunk or %NULL if this is the sentinel
236  * chunk in the pool header. */
238 
239  /* Actual data starts here. Well aligned for pointers. */
240 };
241 
242 /* A memory pool. This is supposed to be embedded on the stack or
243  * within some other structure. It may optionally be followed by an
244  * embedded array from which requests are fulfilled until
245  * malloc needs to be called to allocate a first real chunk. */
246 struct pool {
247  /* Chunk we're allocating from. */
249 
250  jmp_buf *jmp;
251 
252  /* Free list of previously allocated chunks. All have >= default
253  * capacity. */
255 
256  /* The default capacity of a chunk. */
258 
259  /* Header for the sentinel chunk. Directly following the pool
260  * struct should be some space for embedded elements from which
261  * the sentinel chunk allocates from. */
262  struct _pool_chunk sentinel[1];
263 };
264 
265 /* A polygon edge. */
266 struct edge {
267  /* Next in y-bucket or active list. */
268  struct edge *next;
269 
270  /* Current x coordinate while the edge is on the active
271  * list. Initialised to the x coordinate of the top of the
272  * edge. The quotient is in grid_scaled_x_t units and the
273  * remainder is mod dy in grid_scaled_y_t units.*/
274  struct quorem x;
275 
276  /* Advance of the current x when moving down a subsample line. */
277  struct quorem dxdy;
278 
279  /* Advance of the current x when moving down a full pixel
280  * row. Only initialised when the height of the edge is large
281  * enough that there's a chance the edge could be stepped by a
282  * full row's worth of subsample rows at a time. */
283  struct quorem dxdy_full;
284 
285  /* The clipped y of the top of the edge. */
287 
288  /* y2-y1 after orienting the edge downwards. */
290 
291  /* Number of subsample rows remaining to scan convert of this
292  * edge. */
294 
295  /* Original sign of the edge: +1 for downwards, -1 for upwards
296  * edges. */
297  int dir;
298  int vertical;
299  int clip;
300 };
301 
302 /* Number of subsample rows per y-bucket. Must be GRID_Y. */
303 #define EDGE_Y_BUCKET_HEIGHT GRID_Y
304 
305 #define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/EDGE_Y_BUCKET_HEIGHT)
306 
307 /* A collection of sorted and vertically clipped edges of the polygon.
308  * Edges are moved from the polygon to an active list while scan
309  * converting. */
310 struct polygon {
311  /* The vertical clip extents. */
313 
314  /* Array of edges all starting in the same bucket. An edge is put
315  * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
316  * it is added to the polygon. */
317  struct edge **y_buckets;
319 
320  struct {
321  struct pool base[1];
322  struct edge embedded[32];
324 };
325 
326 /* A cell records the effect on pixel coverage of polygon edges
327  * passing through a pixel. It contains two accumulators of pixel
328  * coverage.
329  *
330  * Consider the effects of a polygon edge on the coverage of a pixel
331  * it intersects and that of the following one. The coverage of the
332  * following pixel is the height of the edge multiplied by the width
333  * of the pixel, and the coverage of the pixel itself is the area of
334  * the trapezoid formed by the edge and the right side of the pixel.
335  *
336  * +-----------------------+-----------------------+
337  * | | |
338  * | | |
339  * |_______________________|_______________________|
340  * | \...................|.......................|\
341  * | \..................|.......................| |
342  * | \.................|.......................| |
343  * | \....covered.....|.......................| |
344  * | \....area.......|.......................| } covered height
345  * | \..............|.......................| |
346  * |uncovered\.............|.......................| |
347  * | area \............|.......................| |
348  * |___________\...........|.......................|/
349  * | | |
350  * | | |
351  * | | |
352  * +-----------------------+-----------------------+
353  *
354  * Since the coverage of the following pixel will always be a multiple
355  * of the width of the pixel, we can store the height of the covered
356  * area instead. The coverage of the pixel itself is the total
357  * coverage minus the area of the uncovered area to the left of the
358  * edge. As it's faster to compute the uncovered area we only store
359  * that and subtract it from the total coverage later when forming
360  * spans to blit.
361  *
362  * The heights and areas are signed, with left edges of the polygon
363  * having positive sign and right edges having negative sign. When
364  * two edges intersect they swap their left/rightness so their
365  * contribution above and below the intersection point must be
366  * computed separately. */
367 struct cell {
368  struct cell *next;
369  int x;
373 };
374 
375 /* A cell list represents the scan line sparsely as cells ordered by
376  * ascending x. It is geared towards scanning the cells in order
377  * using an internal cursor. */
378 struct cell_list {
379  /* Sentinel nodes */
380  struct cell head, tail;
381 
382  /* Cursor state for iterating through the cell list. */
383  struct cell *cursor;
384 
385  /* Cells in the cell list are owned by the cell list and are
386  * allocated from this pool. */
387  struct {
388  struct pool base[1];
389  struct cell embedded[32];
391 };
392 
393 struct cell_pair {
394  struct cell *cell1;
395  struct cell *cell2;
396 };
397 
398 /* The active list contains edges in the current scan line ordered by
399  * the x-coordinate of the intercept of the edge and the scan line. */
400 struct active_list {
401  /* Leftmost edge on the current scan line. */
402  struct edge *head;
403 
404  /* A lower bound on the height of the active edges is used to
405  * estimate how soon some active edge ends. We can't advance the
406  * scan conversion by a full pixel row if an edge ends somewhere
407  * within it. */
409 };
410 
412  struct polygon polygon[1];
413  struct active_list active[1];
414  struct cell_list coverages[1];
415 
416  /* Clip box. */
418 };
419 
420 /* Compute the floored division a/b. Assumes / and % perform symmetric
421  * division. */
422 inline static struct quorem
424 {
425  struct quorem qr;
426  qr.quo = a/b;
427  qr.rem = a%b;
428  if ((a^b)<0 && qr.rem) {
429  qr.quo -= 1;
430  qr.rem += b;
431  }
432  return qr;
433 }
434 
435 /* Compute the floored division (x*a)/b. Assumes / and % perform symmetric
436  * division. */
437 static struct quorem
439 {
440  struct quorem qr;
441  long long xa = (long long)x*a;
442  qr.quo = xa/b;
443  qr.rem = xa%b;
444  if ((xa>=0) != (b>=0) && qr.rem) {
445  qr.quo -= 1;
446  qr.rem += b;
447  }
448  return qr;
449 }
450 
451 static struct _pool_chunk *
453  struct _pool_chunk *p,
454  struct _pool_chunk *prev_chunk,
455  size_t capacity)
456 {
457  p->prev_chunk = prev_chunk;
458  p->size = 0;
459  p->capacity = capacity;
460  return p;
461 }
462 
463 static struct _pool_chunk *
465 {
466  struct _pool_chunk *p;
467 
468  p = _cairo_malloc (size + sizeof(struct _pool_chunk));
469  if (unlikely (NULL == p))
471 
472  return _pool_chunk_init(p, pool->current, size);
473 }
474 
475 static void
477  jmp_buf *jmp,
478  size_t default_capacity,
479  size_t embedded_capacity)
480 {
481  pool->jmp = jmp;
483  pool->first_free = NULL;
484  pool->default_capacity = default_capacity;
485  _pool_chunk_init(pool->sentinel, NULL, embedded_capacity);
486 }
487 
488 static void
490 {
491  struct _pool_chunk *p = pool->current;
492  do {
493  while (NULL != p) {
494  struct _pool_chunk *prev = p->prev_chunk;
495  if (p != pool->sentinel)
496  free(p);
497  p = prev;
498  }
499  p = pool->first_free;
500  pool->first_free = NULL;
501  } while (NULL != p);
502 }
503 
504 /* Satisfy an allocation by first allocating a new large enough chunk
505  * and adding it to the head of the pool's chunk list. This function
506  * is called as a fallback if pool_alloc() couldn't do a quick
507  * allocation from the current chunk in the pool. */
508 static void *
510  struct pool *pool,
511  size_t size)
512 {
513  struct _pool_chunk *chunk;
514  void *obj;
515  size_t capacity;
516 
517  /* If the allocation is smaller than the default chunk size then
518  * try getting a chunk off the free list. Force alloc of a new
519  * chunk for large requests. */
520  capacity = size;
521  chunk = NULL;
522  if (size < pool->default_capacity) {
524  chunk = pool->first_free;
525  if (chunk) {
526  pool->first_free = chunk->prev_chunk;
527  _pool_chunk_init(chunk, pool->current, chunk->capacity);
528  }
529  }
530 
531  if (NULL == chunk)
533  pool->current = chunk;
534 
535  obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
536  chunk->size += size;
537  return obj;
538 }
539 
540 /* Allocate size bytes from the pool. The first allocated address
541  * returned from a pool is aligned to sizeof(void*). Subsequent
542  * addresses will maintain alignment as long as multiples of void* are
543  * allocated. Returns the address of a new memory area or %NULL on
544  * allocation failures. The pool retains ownership of the returned
545  * memory. */
546 inline static void *
547 pool_alloc (struct pool *pool, size_t size)
548 {
549  struct _pool_chunk *chunk = pool->current;
550 
551  if (size <= chunk->capacity - chunk->size) {
552  void *obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
553  chunk->size += size;
554  return obj;
555  } else {
557  }
558 }
559 
560 /* Relinquish all pool_alloced memory back to the pool. */
561 static void
563 {
564  /* Transfer all used chunks to the chunk free list. */
565  struct _pool_chunk *chunk = pool->current;
566  if (chunk != pool->sentinel) {
567  while (chunk->prev_chunk != pool->sentinel) {
568  chunk = chunk->prev_chunk;
569  }
570  chunk->prev_chunk = pool->first_free;
572  }
573  /* Reset the sentinel as the current chunk. */
575  pool->sentinel->size = 0;
576 }
577 
578 /* Rewinds the cell list's cursor to the beginning. After rewinding
579  * we're good to cell_list_find() the cell any x coordinate. */
580 inline static void
582 {
583  cells->cursor = &cells->head;
584 }
585 
586 /* Rewind the cell list if its cursor has been advanced past x. */
587 inline static void
588 cell_list_maybe_rewind (struct cell_list *cells, int x)
589 {
590  struct cell *tail = cells->cursor;
591  if (tail->x > x)
592  cell_list_rewind (cells);
593 }
594 
595 static void
596 cell_list_init(struct cell_list *cells, jmp_buf *jmp)
597 {
598  pool_init(cells->cell_pool.base, jmp,
599  256*sizeof(struct cell),
600  sizeof(cells->cell_pool.embedded));
601  cells->tail.next = NULL;
602  cells->tail.x = INT_MAX;
603  cells->head.x = INT_MIN;
604  cells->head.next = &cells->tail;
605  cell_list_rewind (cells);
606 }
607 
608 static void
609 cell_list_fini(struct cell_list *cells)
610 {
611  pool_fini (cells->cell_pool.base);
612 }
613 
614 /* Empty the cell list. This is called at the start of every pixel
615  * row. */
616 inline static void
617 cell_list_reset (struct cell_list *cells)
618 {
619  cell_list_rewind (cells);
620  cells->head.next = &cells->tail;
621  pool_reset (cells->cell_pool.base);
622 }
623 
624 static struct cell *
625 cell_list_alloc (struct cell_list *cells,
626  struct cell *tail,
627  int x)
628 {
629  struct cell *cell;
630 
631  cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
632  cell->next = tail->next;
633  tail->next = cell;
634  cell->x = x;
635  cell->uncovered_area = 0;
636  cell->covered_height = 0;
637  cell->clipped_height = 0;
638  return cell;
639 }
640 
641 /* Find a cell at the given x-coordinate. Returns %NULL if a new cell
642  * needed to be allocated but couldn't be. Cells must be found with
643  * non-decreasing x-coordinate until the cell list is rewound using
644  * cell_list_rewind(). Ownership of the returned cell is retained by
645  * the cell list. */
646 inline static struct cell *
647 cell_list_find (struct cell_list *cells, int x)
648 {
649  struct cell *tail = cells->cursor;
650 
651  while (1) {
652  UNROLL3({
653  if (tail->next->x > x)
654  break;
655  tail = tail->next;
656  });
657  }
658 
659  if (tail->x != x)
660  tail = cell_list_alloc (cells, tail, x);
661  return cells->cursor = tail;
662 
663 }
664 
665 /* Find two cells at x1 and x2. This is exactly equivalent
666  * to
667  *
668  * pair.cell1 = cell_list_find(cells, x1);
669  * pair.cell2 = cell_list_find(cells, x2);
670  *
671  * except with less function call overhead. */
672 inline static struct cell_pair
673 cell_list_find_pair(struct cell_list *cells, int x1, int x2)
674 {
675  struct cell_pair pair;
676 
677  pair.cell1 = cells->cursor;
678  while (1) {
679  UNROLL3({
680  if (pair.cell1->next->x > x1)
681  break;
682  pair.cell1 = pair.cell1->next;
683  });
684  }
685  if (pair.cell1->x != x1) {
686  struct cell *cell = pool_alloc (cells->cell_pool.base,
687  sizeof (struct cell));
688  cell->x = x1;
689  cell->uncovered_area = 0;
690  cell->covered_height = 0;
691  cell->clipped_height = 0;
692  cell->next = pair.cell1->next;
693  pair.cell1->next = cell;
694  pair.cell1 = cell;
695  }
696 
697  pair.cell2 = pair.cell1;
698  while (1) {
699  UNROLL3({
700  if (pair.cell2->next->x > x2)
701  break;
702  pair.cell2 = pair.cell2->next;
703  });
704  }
705  if (pair.cell2->x != x2) {
706  struct cell *cell = pool_alloc (cells->cell_pool.base,
707  sizeof (struct cell));
708  cell->uncovered_area = 0;
709  cell->covered_height = 0;
710  cell->clipped_height = 0;
711  cell->x = x2;
712  cell->next = pair.cell2->next;
713  pair.cell2->next = cell;
714  pair.cell2 = cell;
715  }
716 
717  cells->cursor = pair.cell2;
718  return pair;
719 }
720 
721 /* Add a subpixel span covering [x1, x2) to the coverage cells. */
722 inline static void
726 {
727  int ix1, fx1;
728  int ix2, fx2;
729 
732 
733  if (ix1 != ix2) {
734  struct cell_pair p;
735  p = cell_list_find_pair(cells, ix1, ix2);
736  p.cell1->uncovered_area += 2*fx1;
737  ++p.cell1->covered_height;
738  p.cell2->uncovered_area -= 2*fx2;
739  --p.cell2->covered_height;
740  } else {
741  struct cell *cell = cell_list_find(cells, ix1);
742  cell->uncovered_area += 2*(fx1-fx2);
743  }
744 }
745 
746 /* Adds the analytical coverage of an edge crossing the current pixel
747  * row to the coverage cells and advances the edge's x position to the
748  * following row.
749  *
750  * This function is only called when we know that during this pixel row:
751  *
752  * 1) The relative order of all edges on the active list doesn't
753  * change. In particular, no edges intersect within this row to pixel
754  * precision.
755  *
756  * 2) No new edges start in this row.
757  *
758  * 3) No existing edges end mid-row.
759  *
760  * This function depends on being called with all edges from the
761  * active list in the order they appear on the list (i.e. with
762  * non-decreasing x-coordinate.) */
763 static void
765  struct cell_list *cells,
766  struct edge *edge,
767  int sign)
768 {
769  grid_scaled_y_t y1, y2, dy;
770  grid_scaled_x_t dx;
771  int ix1, ix2;
773 
774  struct quorem x1 = edge->x;
775  struct quorem x2 = x1;
776 
777  if (! edge->vertical) {
778  x2.quo += edge->dxdy_full.quo;
779  x2.rem += edge->dxdy_full.rem;
780  if (x2.rem >= 0) {
781  ++x2.quo;
782  x2.rem -= edge->dy;
783  }
784 
785  edge->x = x2;
786  }
787 
788  GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1);
789  GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2);
790 
791  /* Edge is entirely within a column? */
792  if (ix1 == ix2) {
793  /* We always know that ix1 is >= the cell list cursor in this
794  * case due to the no-intersections precondition. */
795  struct cell *cell = cell_list_find(cells, ix1);
798  return;
799  }
800 
801  /* Orient the edge left-to-right. */
802  dx = x2.quo - x1.quo;
803  if (dx >= 0) {
804  y1 = 0;
805  y2 = GRID_Y;
806  } else {
807  int tmp;
808  tmp = ix1; ix1 = ix2; ix2 = tmp;
809  tmp = fx1; fx1 = fx2; fx2 = tmp;
810  dx = -dx;
811  sign = -sign;
812  y1 = GRID_Y;
813  y2 = 0;
814  }
815  dy = y2 - y1;
816 
817  /* Add coverage for all pixels [ix1,ix2] on this row crossed
818  * by the edge. */
819  {
820  struct cell_pair pair;
821  struct quorem y = floored_divrem((GRID_X - fx1)*dy, dx);
822 
823  /* When rendering a previous edge on the active list we may
824  * advance the cell list cursor past the leftmost pixel of the
825  * current edge even though the two edges don't intersect.
826  * e.g. consider two edges going down and rightwards:
827  *
828  * --\_+---\_+-----+-----+----
829  * \_ \_ | |
830  * | \_ | \_ | |
831  * | \_| \_| |
832  * | \_ \_ |
833  * ----+-----+-\---+-\---+----
834  *
835  * The left edge touches cells past the starting cell of the
836  * right edge. Fortunately such cases are rare.
837  *
838  * The rewinding is never necessary if the current edge stays
839  * within a single column because we've checked before calling
840  * this function that the active list order won't change. */
841  cell_list_maybe_rewind(cells, ix1);
842 
843  pair = cell_list_find_pair(cells, ix1, ix1+1);
844  pair.cell1->uncovered_area += sign*y.quo*(GRID_X + fx1);
845  pair.cell1->covered_height += sign*y.quo;
846  y.quo += y1;
847 
848  if (ix1+1 < ix2) {
849  struct quorem dydx_full = floored_divrem(GRID_X*dy, dx);
850  struct cell *cell = pair.cell2;
851 
852  ++ix1;
853  do {
854  grid_scaled_y_t y_skip = dydx_full.quo;
855  y.rem += dydx_full.rem;
856  if (y.rem >= dx) {
857  ++y_skip;
858  y.rem -= dx;
859  }
860 
861  y.quo += y_skip;
862 
863  y_skip *= sign;
864  cell->uncovered_area += y_skip*GRID_X;
865  cell->covered_height += y_skip;
866 
867  ++ix1;
868  cell = cell_list_find(cells, ix1);
869  } while (ix1 != ix2);
870 
871  pair.cell2 = cell;
872  }
873  pair.cell2->uncovered_area += sign*(y2 - y.quo)*fx2;
874  pair.cell2->covered_height += sign*(y2 - y.quo);
875  }
876 }
877 
878 static void
879 polygon_init (struct polygon *polygon, jmp_buf *jmp)
880 {
881  polygon->ymin = polygon->ymax = 0;
884  8192 - sizeof (struct _pool_chunk),
885  sizeof (polygon->edge_pool.embedded));
886 }
887 
888 static void
890 {
893 
895 }
896 
897 /* Empties the polygon of all edges. The polygon is then prepared to
898  * receive new edges and clip them to the vertical range
899  * [ymin,ymax). */
900 static cairo_status_t
902  grid_scaled_y_t ymin,
903  grid_scaled_y_t ymax)
904 {
905  unsigned h = ymax - ymin;
906  unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + EDGE_Y_BUCKET_HEIGHT-1,
907  ymin);
908 
910 
911  if (unlikely (h > 0x7FFFFFFFU - EDGE_Y_BUCKET_HEIGHT))
912  goto bail_no_mem; /* even if you could, you wouldn't want to. */
913 
916 
918  if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) {
919  polygon->y_buckets = _cairo_malloc_ab (num_buckets,
920  sizeof (struct edge *));
921  if (unlikely (NULL == polygon->y_buckets))
922  goto bail_no_mem;
923  }
924  memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
925 
926  polygon->ymin = ymin;
927  polygon->ymax = ymax;
928  return CAIRO_STATUS_SUCCESS;
929 
930  bail_no_mem:
931  polygon->ymin = 0;
932  polygon->ymax = 0;
933  return CAIRO_STATUS_NO_MEMORY;
934 }
935 
936 static void
938  struct polygon *polygon,
939  struct edge *e)
940 {
941  unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin);
942  struct edge **ptail = &polygon->y_buckets[ix];
943  e->next = *ptail;
944  *ptail = e;
945 }
946 
947 inline static void
949  const cairo_edge_t *edge,
950  int clip)
951 {
952  struct edge *e;
953  grid_scaled_x_t dx;
955  grid_scaled_y_t ytop, ybot;
956  grid_scaled_y_t ymin = polygon->ymin;
957  grid_scaled_y_t ymax = polygon->ymax;
958 
959  assert (edge->bottom > edge->top);
960 
961  if (unlikely (edge->top >= ymax || edge->bottom <= ymin))
962  return;
963 
964  e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
965 
966  dx = edge->line.p2.x - edge->line.p1.x;
967  dy = edge->line.p2.y - edge->line.p1.y;
968  e->dy = dy;
969  e->dir = edge->dir;
970  e->clip = clip;
971 
972  ytop = edge->top >= ymin ? edge->top : ymin;
973  ybot = edge->bottom <= ymax ? edge->bottom : ymax;
974  e->ytop = ytop;
975  e->height_left = ybot - ytop;
976 
977  if (dx == 0) {
978  e->vertical = TRUE;
979  e->x.quo = edge->line.p1.x;
980  e->x.rem = 0;
981  e->dxdy.quo = 0;
982  e->dxdy.rem = 0;
983  e->dxdy_full.quo = 0;
984  e->dxdy_full.rem = 0;
985  } else {
986  e->vertical = FALSE;
987  e->dxdy = floored_divrem (dx, dy);
988  if (ytop == edge->line.p1.y) {
989  e->x.quo = edge->line.p1.x;
990  e->x.rem = 0;
991  } else {
992  e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy);
993  e->x.quo += edge->line.p1.x;
994  }
995 
996  if (e->height_left >= GRID_Y) {
997  e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy);
998  } else {
999  e->dxdy_full.quo = 0;
1000  e->dxdy_full.rem = 0;
1001  }
1002  }
1003 
1005 
1006  e->x.rem -= dy; /* Bias the remainder for faster
1007  * edge advancement. */
1008 }
1009 
1010 static void
1012 {
1013  active->head = NULL;
1014  active->min_height = 0;
1015 }
1016 
1017 static void
1019 {
1021 }
1022 
1023 /*
1024  * Merge two sorted edge lists.
1025  * Input:
1026  * - head_a: The head of the first list.
1027  * - head_b: The head of the second list; head_b cannot be NULL.
1028  * Output:
1029  * Returns the head of the merged list.
1030  *
1031  * Implementation notes:
1032  * To make it fast (in particular, to reduce to an insertion sort whenever
1033  * one of the two input lists only has a single element) we iterate through
1034  * a list until its head becomes greater than the head of the other list,
1035  * then we switch their roles. As soon as one of the two lists is empty, we
1036  * just attach the other one to the current list and exit.
1037  * Writes to memory are only needed to "switch" lists (as it also requires
1038  * attaching to the output list the list which we will be iterating next) and
1039  * to attach the last non-empty list.
1040  */
1041 static struct edge *
1042 merge_sorted_edges (struct edge *head_a, struct edge *head_b)
1043 {
1044  struct edge *head, **next;
1045  int32_t x;
1046 
1047  if (head_a == NULL)
1048  return head_b;
1049 
1050  next = &head;
1051  if (head_a->x.quo <= head_b->x.quo) {
1052  head = head_a;
1053  } else {
1054  head = head_b;
1055  goto start_with_b;
1056  }
1057 
1058  do {
1059  x = head_b->x.quo;
1060  while (head_a != NULL && head_a->x.quo <= x) {
1061  next = &head_a->next;
1062  head_a = head_a->next;
1063  }
1064 
1065  *next = head_b;
1066  if (head_a == NULL)
1067  return head;
1068 
1069 start_with_b:
1070  x = head_a->x.quo;
1071  while (head_b != NULL && head_b->x.quo <= x) {
1072  next = &head_b->next;
1073  head_b = head_b->next;
1074  }
1075 
1076  *next = head_a;
1077  if (head_b == NULL)
1078  return head;
1079  } while (1);
1080 }
1081 
1082 /*
1083  * Sort (part of) a list.
1084  * Input:
1085  * - list: The list to be sorted; list cannot be NULL.
1086  * - limit: Recursion limit.
1087  * Output:
1088  * - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
1089  * input list; if the input list has fewer elements, head_out be a sorted list
1090  * containing all the elements of the input list.
1091  * Returns the head of the list of unprocessed elements (NULL if the sorted list contains
1092  * all the elements of the input list).
1093  *
1094  * Implementation notes:
1095  * Special case single element list, unroll/inline the sorting of the first two elements.
1096  * Some tail recursion is used since we iterate on the bottom-up solution of the problem
1097  * (we start with a small sorted list and keep merging other lists of the same size to it).
1098  */
1099 static struct edge *
1101  unsigned int level,
1102  struct edge **head_out)
1103 {
1104  struct edge *head_other, *remaining;
1105  unsigned int i;
1106 
1107  head_other = list->next;
1108 
1109  /* Single element list -> return */
1110  if (head_other == NULL) {
1111  *head_out = list;
1112  return NULL;
1113  }
1114 
1115  /* Unroll the first iteration of the following loop (halves the number of calls to merge_sorted_edges):
1116  * - Initialize remaining to be the list containing the elements after the second in the input list.
1117  * - Initialize *head_out to be the sorted list containing the first two element.
1118  */
1119  remaining = head_other->next;
1120  if (list->x.quo <= head_other->x.quo) {
1121  *head_out = list;
1122  /* list->next = head_other; */ /* The input list is already like this. */
1123  head_other->next = NULL;
1124  } else {
1125  *head_out = head_other;
1126  head_other->next = list;
1127  list->next = NULL;
1128  }
1129 
1130  for (i = 0; i < level && remaining; i++) {
1131  /* Extract a sorted list of the same size as *head_out
1132  * (2^(i+1) elements) from the list of remaining elements. */
1133  remaining = sort_edges (remaining, i, &head_other);
1134  *head_out = merge_sorted_edges (*head_out, head_other);
1135  }
1136 
1137  /* *head_out now contains (at most) 2^(level+1) elements. */
1138 
1139  return remaining;
1140 }
1141 
1142 /* Test if the edges on the active list can be safely advanced by a
1143  * full row without intersections or any edges ending. */
1144 inline static int
1146 {
1147  const struct edge *e;
1148  int prev_x = INT_MIN;
1149 
1150  /* Recomputes the minimum height of all edges on the active
1151  * list if we have been dropping edges. */
1152  if (active->min_height <= 0) {
1153  int min_height = INT_MAX;
1154 
1155  e = active->head;
1156  while (NULL != e) {
1157  if (e->height_left < min_height)
1158  min_height = e->height_left;
1159  e = e->next;
1160  }
1161 
1162  active->min_height = min_height;
1163  }
1164 
1165  if (active->min_height < GRID_Y)
1166  return 0;
1167 
1168  /* Check for intersections as no edges end during the next row. */
1169  e = active->head;
1170  while (NULL != e) {
1171  struct quorem x = e->x;
1172 
1173  if (! e->vertical) {
1174  x.quo += e->dxdy_full.quo;
1175  x.rem += e->dxdy_full.rem;
1176  if (x.rem >= 0)
1177  ++x.quo;
1178  }
1179 
1180  if (x.quo <= prev_x)
1181  return 0;
1182 
1183  prev_x = x.quo;
1184  e = e->next;
1185  }
1186 
1187  return 1;
1188 }
1189 
1190 /* Merges edges on the given subpixel row from the polygon to the
1191  * active_list. */
1192 inline static void
1194  struct edge **ptail,
1196  struct polygon *polygon)
1197 {
1198  /* Split off the edges on the current subrow and merge them into
1199  * the active list. */
1200  int min_height = active->min_height;
1201  struct edge *subrow_edges = NULL;
1202  struct edge *tail = *ptail;
1203 
1204  do {
1205  struct edge *next = tail->next;
1206 
1207  if (y == tail->ytop) {
1208  tail->next = subrow_edges;
1209  subrow_edges = tail;
1210 
1211  if (tail->height_left < min_height)
1212  min_height = tail->height_left;
1213 
1214  *ptail = next;
1215  } else
1216  ptail = &tail->next;
1217 
1218  tail = next;
1219  } while (tail);
1220 
1221  if (subrow_edges) {
1222  sort_edges (subrow_edges, UINT_MAX, &subrow_edges);
1223  active->head = merge_sorted_edges (active->head, subrow_edges);
1224  active->min_height = min_height;
1225  }
1226 }
1227 
1228 /* Advance the edges on the active list by one subsample row by
1229  * updating their x positions. Drop edges from the list that end. */
1230 inline static void
1232 {
1233  struct edge **cursor = &active->head;
1235  struct edge *unsorted = NULL;
1236  struct edge *edge = *cursor;
1237 
1238  do {
1239  UNROLL3({
1240  struct edge *next;
1241 
1242  if (NULL == edge)
1243  break;
1244 
1245  next = edge->next;
1246  if (--edge->height_left) {
1247  edge->x.quo += edge->dxdy.quo;
1248  edge->x.rem += edge->dxdy.rem;
1249  if (edge->x.rem >= 0) {
1250  ++edge->x.quo;
1251  edge->x.rem -= edge->dy;
1252  }
1253 
1254  if (edge->x.quo < prev_x) {
1255  *cursor = next;
1256  edge->next = unsorted;
1257  unsorted = edge;
1258  } else {
1259  prev_x = edge->x.quo;
1260  cursor = &edge->next;
1261  }
1262  } else {
1263  *cursor = next;
1264  }
1265  edge = next;
1266  })
1267  } while (1);
1268 
1269  if (unsorted) {
1270  sort_edges (unsorted, UINT_MAX, &unsorted);
1271  active->head = merge_sorted_edges (active->head, unsorted);
1272  }
1273 }
1274 
1275 inline static void
1277  struct cell_list *coverages)
1278 {
1279  struct edge *edge = active->head;
1280  int winding = 0;
1281  int xstart;
1282  int xend;
1283 
1284  cell_list_rewind (coverages);
1285 
1286  while (NULL != edge) {
1287  xstart = edge->x.quo;
1288  winding = edge->dir;
1289  while (1) {
1290  edge = edge->next;
1291  if (NULL == edge) {
1293  return;
1294  }
1295 
1296  winding += edge->dir;
1297  if (0 == winding) {
1298  if (edge->next == NULL || edge->next->x.quo != edge->x.quo)
1299  break;
1300  }
1301  }
1302 
1303  xend = edge->x.quo;
1304  cell_list_add_subspan (coverages, xstart, xend);
1305 
1306  edge = edge->next;
1307  }
1308 }
1309 
1310 static void
1312  struct cell_list *coverages)
1313 {
1314  struct edge *edge = active->head;
1315  int xstart;
1316  int xend;
1317 
1318  cell_list_rewind (coverages);
1319 
1320  while (NULL != edge) {
1321  xstart = edge->x.quo;
1322 
1323  while (1) {
1324  edge = edge->next;
1325  if (NULL == edge) {
1327  return;
1328  }
1329 
1330  if (edge->next == NULL || edge->next->x.quo != edge->x.quo)
1331  break;
1332 
1333  edge = edge->next;
1334  }
1335 
1336  xend = edge->x.quo;
1337  cell_list_add_subspan (coverages, xstart, xend);
1338 
1339  edge = edge->next;
1340  }
1341 }
1342 
1343 static void
1345  struct cell_list *coverages)
1346 {
1347  struct edge **cursor = &active->head;
1348  struct edge *left_edge;
1349 
1350  left_edge = *cursor;
1351  while (NULL != left_edge) {
1352  struct edge *right_edge;
1353  int winding = left_edge->dir;
1354 
1355  left_edge->height_left -= GRID_Y;
1356  if (left_edge->height_left)
1357  cursor = &left_edge->next;
1358  else
1359  *cursor = left_edge->next;
1360 
1361  while (1) {
1362  right_edge = *cursor;
1363  if (NULL == right_edge) {
1364  cell_list_render_edge (coverages, left_edge, +1);
1365  return;
1366  }
1367 
1368  right_edge->height_left -= GRID_Y;
1369  if (right_edge->height_left)
1370  cursor = &right_edge->next;
1371  else
1372  *cursor = right_edge->next;
1373 
1374  winding += right_edge->dir;
1375  if (0 == winding) {
1376  if (right_edge->next == NULL ||
1377  right_edge->next->x.quo != right_edge->x.quo)
1378  {
1379  break;
1380  }
1381  }
1382 
1383  if (! right_edge->vertical) {
1384  right_edge->x.quo += right_edge->dxdy_full.quo;
1385  right_edge->x.rem += right_edge->dxdy_full.rem;
1386  if (right_edge->x.rem >= 0) {
1387  ++right_edge->x.quo;
1388  right_edge->x.rem -= right_edge->dy;
1389  }
1390  }
1391  }
1392 
1393  cell_list_render_edge (coverages, left_edge, +1);
1394  cell_list_render_edge (coverages, right_edge, -1);
1395 
1396  left_edge = *cursor;
1397  }
1398 }
1399 
1400 static void
1402  struct cell_list *coverages)
1403 {
1404  struct edge **cursor = &active->head;
1405  struct edge *left_edge;
1406 
1407  left_edge = *cursor;
1408  while (NULL != left_edge) {
1409  struct edge *right_edge;
1410 
1411  left_edge->height_left -= GRID_Y;
1412  if (left_edge->height_left)
1413  cursor = &left_edge->next;
1414  else
1415  *cursor = left_edge->next;
1416 
1417  while (1) {
1418  right_edge = *cursor;
1419  if (NULL == right_edge) {
1420  cell_list_render_edge (coverages, left_edge, +1);
1421  return;
1422  }
1423 
1424  right_edge->height_left -= GRID_Y;
1425  if (right_edge->height_left)
1426  cursor = &right_edge->next;
1427  else
1428  *cursor = right_edge->next;
1429 
1430  if (right_edge->next == NULL ||
1431  right_edge->next->x.quo != right_edge->x.quo)
1432  {
1433  break;
1434  }
1435 
1436  if (! right_edge->vertical) {
1437  right_edge->x.quo += right_edge->dxdy_full.quo;
1438  right_edge->x.rem += right_edge->dxdy_full.rem;
1439  if (right_edge->x.rem >= 0) {
1440  ++right_edge->x.quo;
1441  right_edge->x.rem -= right_edge->dy;
1442  }
1443  }
1444  }
1445 
1446  cell_list_render_edge (coverages, left_edge, +1);
1447  cell_list_render_edge (coverages, right_edge, -1);
1448 
1449  left_edge = *cursor;
1450  }
1451 }
1452 
1453 static void
1455 {
1456  polygon_init(converter->polygon, jmp);
1457  active_list_init(converter->active);
1458  cell_list_init(converter->coverages, jmp);
1459  converter->ymin=0;
1460  converter->ymax=0;
1461 }
1462 
1463 static void
1465 {
1466  polygon_fini(converter->polygon);
1467  cell_list_fini(converter->coverages);
1468  converter->ymin=0;
1469  converter->ymax=0;
1470 }
1471 
1472 static grid_scaled_t
1474 {
1475  /* Clamp to max/min representable scaled number. */
1476  if (i >= 0) {
1477  if (i >= INT_MAX/scale)
1478  i = INT_MAX/scale;
1479  }
1480  else {
1481  if (i <= INT_MIN/scale)
1482  i = INT_MIN/scale;
1483  }
1484  return i*scale;
1485 }
1486 
1487 #define int_to_grid_scaled_x(x) int_to_grid_scaled((x), GRID_X)
1488 #define int_to_grid_scaled_y(x) int_to_grid_scaled((x), GRID_Y)
1489 
1490 static cairo_status_t
1492  int ymin, int ymax)
1493 {
1495 
1496  converter->ymin = 0;
1497  converter->ymax = 0;
1498 
1499  ymin = int_to_grid_scaled_y(ymin);
1500  ymax = int_to_grid_scaled_y(ymax);
1501 
1502  active_list_reset(converter->active);
1503  cell_list_reset(converter->coverages);
1504  status = polygon_reset(converter->polygon, ymin, ymax);
1505  if (status)
1506  return status;
1507 
1508  converter->ymin = ymin;
1509  converter->ymax = ymax;
1510  return CAIRO_STATUS_SUCCESS;
1511 }
1512 
1513 /* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
1514  * These macros convert an input coordinate in the client's
1515  * device space to the rasterisation grid.
1516  */
1517 /* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
1518  * shifts if possible, and something saneish if not.
1519  */
1520 #if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS
1521 # define INPUT_TO_GRID_Y(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_Y_BITS)
1522 #else
1523 # define INPUT_TO_GRID_Y(in, out) INPUT_TO_GRID_general(in, out, GRID_Y)
1524 #endif
1525 
1526 #if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS) && GRID_X_BITS <= GLITTER_INPUT_BITS
1527 # define INPUT_TO_GRID_X(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_X_BITS)
1528 #else
1529 # define INPUT_TO_GRID_X(in, out) INPUT_TO_GRID_general(in, out, GRID_X)
1530 #endif
1531 
1532 #define INPUT_TO_GRID_general(in, out, grid_scale) do { \
1533  long long tmp__ = (long long)(grid_scale) * (in); \
1534  tmp__ >>= GLITTER_INPUT_BITS; \
1535  (out) = tmp__; \
1536 } while (0)
1537 
1538 static void
1540  const cairo_edge_t *edge,
1541  int clip)
1542 {
1543  cairo_edge_t e;
1544 
1545  INPUT_TO_GRID_Y (edge->top, e.top);
1546  INPUT_TO_GRID_Y (edge->bottom, e.bottom);
1547  if (e.top >= e.bottom)
1548  return;
1549 
1550  /* XXX: possible overflows if GRID_X/Y > 2**GLITTER_INPUT_BITS */
1551  INPUT_TO_GRID_Y (edge->line.p1.y, e.line.p1.y);
1552  INPUT_TO_GRID_Y (edge->line.p2.y, e.line.p2.y);
1553  if (e.line.p1.y == e.line.p2.y)
1554  return;
1555 
1556  INPUT_TO_GRID_X (edge->line.p1.x, e.line.p1.x);
1557  INPUT_TO_GRID_X (edge->line.p2.x, e.line.p2.x);
1558 
1559  e.dir = edge->dir;
1560 
1561  polygon_add_edge (converter->polygon, &e, clip);
1562 }
1563 
1564 static cairo_bool_t
1566 {
1567  struct edge *e;
1568 
1569  for (e = active->head; e != NULL; e = e->next) {
1570  if (! e->vertical)
1571  return FALSE;
1572  }
1573 
1574  return TRUE;
1575 }
1576 
1577 static void
1579 {
1580  struct edge **cursor = &active->head;
1581  struct edge *edge;
1582 
1583  for (edge = *cursor; edge != NULL; edge = *cursor) {
1584  edge->height_left -= GRID_Y * count;
1585  if (edge->height_left)
1586  cursor = &edge->next;
1587  else
1588  *cursor = edge->next;
1589  }
1590 }
1591 
1592 static cairo_status_t
1593 blit_coverages (struct cell_list *cells,
1594  cairo_span_renderer_t *renderer,
1595  struct pool *span_pool,
1596  int y, int height)
1597 {
1598  struct cell *cell = cells->head.next;
1599  int prev_x = -1;
1600  int cover = 0, last_cover = 0;
1601  int clip = 0;
1602  cairo_half_open_span_t *spans;
1603  unsigned num_spans;
1604 
1605  assert (cell != &cells->tail);
1606 
1607  /* Count number of cells remaining. */
1608  {
1609  struct cell *next = cell;
1610  num_spans = 2;
1611  while (next->next) {
1612  next = next->next;
1613  ++num_spans;
1614  }
1615  num_spans = 2*num_spans;
1616  }
1617 
1618  /* Allocate enough spans for the row. */
1619  pool_reset (span_pool);
1620  spans = pool_alloc (span_pool, sizeof(spans[0])*num_spans);
1621  num_spans = 0;
1622 
1623  /* Form the spans from the coverages and areas. */
1624  for (; cell->next; cell = cell->next) {
1625  int x = cell->x;
1626  int area;
1627 
1628  if (x > prev_x && cover != last_cover) {
1629  spans[num_spans].x = prev_x;
1630  spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1631  spans[num_spans].inverse = 0;
1632  last_cover = cover;
1633  ++num_spans;
1634  }
1635 
1638  area = cover - cell->uncovered_area;
1639 
1640  if (area != last_cover) {
1641  spans[num_spans].x = x;
1642  spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area);
1643  spans[num_spans].inverse = 0;
1644  last_cover = area;
1645  ++num_spans;
1646  }
1647 
1648  prev_x = x+1;
1649  }
1650 
1651  /* Dump them into the renderer. */
1652  return renderer->render_rows (renderer, y, height, spans, num_spans);
1653 }
1654 
1655 static void
1657  int nonzero_fill,
1658  cairo_span_renderer_t *span_renderer,
1659  struct pool *span_pool)
1660 {
1661  int i, j;
1662  int ymax_i = converter->ymax / GRID_Y;
1663  int ymin_i = converter->ymin / GRID_Y;
1664  int h = ymax_i - ymin_i;
1665  struct polygon *polygon = converter->polygon;
1666  struct cell_list *coverages = converter->coverages;
1667  struct active_list *active = converter->active;
1668 
1669  /* Render each pixel row. */
1670  for (i = 0; i < h; i = j) {
1671  int do_full_step = 0;
1672 
1673  j = i + 1;
1674 
1675  /* Determine if we can ignore this row or use the full pixel
1676  * stepper. */
1677  if (GRID_Y == EDGE_Y_BUCKET_HEIGHT && ! polygon->y_buckets[i]) {
1678  if (! active->head) {
1679  for (; j < h && ! polygon->y_buckets[j]; j++)
1680  ;
1681  continue;
1682  }
1683 
1684  do_full_step = active_list_can_step_full_row (active);
1685  }
1686 
1687  if (do_full_step) {
1688  /* Step by a full pixel row's worth. */
1689  if (nonzero_fill)
1691  else
1693 
1695  while (j < h &&
1696  polygon->y_buckets[j] == NULL &&
1697  active->min_height >= 2*GRID_Y)
1698  {
1699  active->min_height -= GRID_Y;
1700  j++;
1701  }
1702  if (j != i + 1)
1703  step_edges (active, j - (i + 1));
1704  }
1705  } else {
1706  grid_scaled_y_t suby;
1707 
1708  /* Subsample this row. */
1709  for (suby = 0; suby < GRID_Y; suby++) {
1710  grid_scaled_y_t y = (i+ymin_i)*GRID_Y + suby;
1711 
1712  if (polygon->y_buckets[i]) {
1714  &polygon->y_buckets[i], y,
1715  polygon);
1716  }
1717 
1718  if (nonzero_fill)
1720  else
1722 
1724  }
1725  }
1726 
1727  blit_coverages (coverages, span_renderer, span_pool, i+ymin_i, j -i);
1728  cell_list_reset (coverages);
1729 
1730  if (! active->head)
1731  active->min_height = INT_MAX;
1732  else
1733  active->min_height -= GRID_Y;
1734  }
1735 }
1736 
1739 
1743 
1746 
1747  jmp_buf jmp;
1748 
1749  struct {
1750  struct pool base[1];
1753 };
1754 
1756 
1757 static void
1759 {
1761  if (self == NULL) {
1762  return;
1763  }
1764  _glitter_scan_converter_fini (self->converter);
1765  pool_fini (self->span_pool.base);
1766  free(self);
1767 }
1768 
1769 static cairo_status_t
1771  cairo_span_renderer_t *renderer)
1772 {
1775 
1776  if ((status = setjmp (self->jmp)))
1778 
1779  glitter_scan_converter_render (self->converter,
1780  self->fill_rule == CAIRO_FILL_RULE_WINDING,
1781  renderer,
1782  self->span_pool.base);
1783  return CAIRO_STATUS_SUCCESS;
1784 }
1785 
1791 {
1793  cairo_polygon_t clipper;
1795  int i;
1796 
1797  self = calloc (1, sizeof(struct _cairo_clip_tor_scan_converter));
1798  if (unlikely (self == NULL)) {
1800  goto bail_nomem;
1801  }
1802 
1803  self->base.destroy = _cairo_clip_tor_scan_converter_destroy;
1804  self->base.generate = _cairo_clip_tor_scan_converter_generate;
1805 
1806  pool_init (self->span_pool.base, &self->jmp,
1807  250 * sizeof(self->span_pool.embedded[0]),
1808  sizeof(self->span_pool.embedded));
1809 
1810  _glitter_scan_converter_init (self->converter, &self->jmp);
1811  status = glitter_scan_converter_reset (self->converter,
1812  clip->extents.y,
1813  clip->extents.y + clip->extents.height);
1814  if (unlikely (status))
1815  goto bail;
1816 
1817  self->fill_rule = fill_rule;
1818  self->antialias = antialias;
1819 
1820  for (i = 0; i < polygon->num_edges; i++)
1821  glitter_scan_converter_add_edge (self->converter,
1822  &polygon->edges[i],
1823  FALSE);
1824 
1826  &clipper,
1827  &self->clip_fill_rule,
1828  &self->clip_antialias);
1829  if (unlikely (status))
1830  goto bail;
1831 
1832  for (i = 0; i < clipper.num_edges; i++)
1833  glitter_scan_converter_add_edge (self->converter,
1834  &clipper.edges[i],
1835  TRUE);
1836  _cairo_polygon_fini (&clipper);
1837 
1838  return &self->base;
1839 
1840  bail:
1841  self->base.destroy(&self->base);
1842  bail_nomem:
1844 }
1845 
int level
Definition: afm2pl.c:1694
#define active
Definition: aptex-macros.h:325
#define head
Definition: aptex-macros.h:513
#define count(a)
Definition: aptex-macros.h:781
#define height(a)
Definition: aptex-macros.h:200
#define next(a)
Definition: aptex-macros.h:924
#define tail
Definition: aptex-macros.h:514
cairo_int_status_t _cairo_clip_get_polygon(const cairo_clip_t *clip, cairo_polygon_t *polygon, cairo_fill_rule_t *fill_rule, cairo_antialias_t *antialias)
static struct edge * merge_sorted_edges(struct edge *head_a, struct edge *head_b)
static void polygon_add_edge(struct polygon *polygon, const cairo_edge_t *edge, int clip)
static void cell_list_maybe_rewind(struct cell_list *cells, int x)
static void glitter_scan_converter_render(glitter_scan_converter_t *converter, int nonzero_fill, cairo_span_renderer_t *span_renderer, struct pool *span_pool)
static int active_list_can_step_full_row(struct active_list *active)
static struct quorem floored_muldivrem(int x, int a, int b)
static void cell_list_fini(struct cell_list *cells)
static void cell_list_reset(struct cell_list *cells)
static void apply_nonzero_fill_rule_for_subrow(struct active_list *active, struct cell_list *coverages)
cairo_scan_converter_t * _cairo_clip_tor_scan_converter_create(cairo_clip_t *clip, cairo_polygon_t *polygon, cairo_fill_rule_t fill_rule, cairo_antialias_t antialias)
#define int_to_grid_scaled_y(x)
static cairo_status_t _cairo_clip_tor_scan_converter_generate(void *converter, cairo_span_renderer_t *renderer)
static void _polygon_insert_edge_into_its_y_bucket(struct polygon *polygon, struct edge *e)
#define EDGE_Y_BUCKET_HEIGHT
static struct _pool_chunk * _pool_chunk_init(struct _pool_chunk *p, struct _pool_chunk *prev_chunk, size_t capacity)
static void pool_init(struct pool *pool, jmp_buf *jmp, size_t default_capacity, size_t embedded_capacity)
static struct edge * sort_edges(struct edge *list, unsigned int level, struct edge **head_out)
static struct _pool_chunk * _pool_chunk_create(struct pool *pool, size_t size)
static void _cairo_clip_tor_scan_converter_destroy(void *converter)
static struct cell * cell_list_find(struct cell_list *cells, int x)
int glitter_input_scaled_t
static void active_list_init(struct active_list *active)
static void glitter_scan_converter_add_edge(glitter_scan_converter_t *converter, const cairo_edge_t *edge, int clip)
static struct cell_pair cell_list_find_pair(struct cell_list *cells, int x1, int x2)
static void step_edges(struct active_list *active, int count)
static void cell_list_init(struct cell_list *cells, jmp_buf *jmp)
static void active_list_reset(struct active_list *active)
static void * _pool_alloc_from_new_chunk(struct pool *pool, size_t size)
static void active_list_substep_edges(struct active_list *active)
static void pool_reset(struct pool *pool)
static void pool_fini(struct pool *pool)
static struct quorem floored_divrem(int a, int b)
static void apply_evenodd_fill_rule_for_subrow(struct active_list *active, struct cell_list *coverages)
static cairo_status_t blit_coverages(struct cell_list *cells, cairo_span_renderer_t *renderer, struct pool *span_pool, int y, int height)
static struct cell * cell_list_alloc(struct cell_list *cells, struct cell *tail, int x)
static grid_scaled_t int_to_grid_scaled(int i, int scale)
static cairo_status_t polygon_reset(struct polygon *polygon, grid_scaled_y_t ymin, grid_scaled_y_t ymax)
#define GRID_AREA_TO_ALPHA(c)
#define EDGE_Y_BUCKET_INDEX(y, ymin)
#define GRID_X_TO_INT_FRAC(x, i, f)
static void _glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
static void _glitter_scan_converter_fini(glitter_scan_converter_t *converter)
#define UNROLL3(x)
static void * pool_alloc(struct pool *pool, size_t size)
static cairo_bool_t active_list_is_vertical(struct active_list *active)
static void polygon_init(struct polygon *polygon, jmp_buf *jmp)
static void cell_list_render_edge(struct cell_list *cells, struct edge *edge, int sign)
static void cell_list_add_subspan(struct cell_list *cells, grid_scaled_x_t x1, grid_scaled_x_t x2)
#define INPUT_TO_GRID_X(in, out)
static void apply_nonzero_fill_rule_and_step_edges(struct active_list *active, struct cell_list *coverages)
static void apply_evenodd_fill_rule_and_step_edges(struct active_list *active, struct cell_list *coverages)
static cairo_status_t glitter_scan_converter_reset(glitter_scan_converter_t *converter, int ymin, int ymax)
static void polygon_fini(struct polygon *polygon)
static void cell_list_rewind(struct cell_list *cells)
#define INPUT_TO_GRID_Y(in, out)
static void active_list_merge_edges_from_polygon(struct active_list *active, struct edge **ptail, grid_scaled_y_t y, struct polygon *polygon)
cairo_status_t _cairo_error(cairo_status_t status)
Definition: cairo-error.c:65
#define _cairo_malloc_ab(a, size)
#define _cairo_malloc(size)
void _cairo_polygon_fini(cairo_polygon_t *polygon)
cairo_status_t _cairo_scan_converter_set_error(void *abstract_converter, cairo_status_t error)
Definition: cairo-spans.c:58
cairo_scan_converter_t * _cairo_scan_converter_create_in_error(cairo_status_t error)
Definition: cairo-spans.c:81
enum _cairo_fill_rule cairo_fill_rule_t
enum _cairo_antialias cairo_antialias_t
int cairo_bool_t
Definition: cairo.h:107
@ CAIRO_STATUS_SUCCESS
Definition: cairo.h:315
@ CAIRO_STATUS_NO_MEMORY
Definition: cairo.h:317
enum _cairo_status cairo_status_t
@ CAIRO_FILL_RULE_WINDING
Definition: cairo.h:754
#define ASSERT_NOT_REACHED
Definition: cairoint.h:155
#define ARRAY_LENGTH(__array)
Definition: cairoint.h:137
#define b
Definition: jpegint.h:372
@ FALSE
Definition: dd.h:101
@ TRUE
Definition: dd.h:102
#define free(a)
Definition: decNumber.cpp:310
int h
Definition: dviconv.c:9
static FIELD_PTR prev
Definition: genind.c:36
#define a(n)
Definition: gpos-common.c:148
assert(pcxLoadImage24((char *)((void *) 0), fp, pinfo, hdr))
#define unlikely(x)
Definition: jbig2arith.cc:116
#define NULL
Definition: ftobjs.h:61
small capitals from c petite p
Definition: afcover.h:72
small capitals from c petite p scientific i
Definition: afcover.h:80
sizeof(AF_ModuleRec)
kerning y
Definition: ttdriver.c:212
signed int int32_t
Definition: stdint.h:77
#define cover(idx)
Definition: JPXStream.cc:177
voidp calloc()
int capacity
Definition: pdfcolor.c:1335
#define INT_MIN
Definition: c-minmax.h:50
#define INT_MAX
Definition: c-minmax.h:53
#define UINT_MAX
Definition: c-minmax.h:56
struct cell_struct cell
cell * list
Definition: list_routines.h:30
float x
Definition: cordic.py:15
static GooString antialias
Definition: pdftocairo.cc:119
#define sign(x)
set set set set set set set macro pixldst1 abits if abits op else op endif endm macro pixldst2 abits if abits op else op endif endm macro pixldst4 abits if abits op else op endif endm macro pixldst0 abits op endm macro pixldst3 mem_operand op endm macro pixldst30 mem_operand op endm macro pixldst abits if abits elseif abits elseif abits elseif abits elseif abits pixldst0 abits else pixldst0 abits pixldst0 abits pixldst0 abits pixldst0 abits endif elseif abits else pixldst0 abits pixldst0 abits endif elseif abits else error unsupported bpp *numpix else pixst endif endm macro pixld1_s mem_operand if asr adds SRC_WIDTH_FIXED bpl add asl mov asr adds SRC_WIDTH_FIXED bpl add asl mov asr adds SRC_WIDTH_FIXED bpl add asl mov asr adds SRC_WIDTH_FIXED bpl add asl elseif asr adds SRC_WIDTH_FIXED bpl add asl mov asr adds SRC_WIDTH_FIXED bpl add asl else error unsupported endif endm macro pixld2_s mem_operand if mov asr add asl add asl mov asr sub UNIT_X add asl mov asr add asl add asl mov asr add UNIT_X add asl else pixld1_s mem_operand pixld1_s mem_operand endif endm macro pixld0_s mem_operand if asr adds SRC_WIDTH_FIXED bpl add asl elseif asr adds SRC_WIDTH_FIXED bpl add asl endif endm macro pixld_s_internal mem_operand if mem_operand pixld2_s mem_operand pixdeinterleave basereg elseif mem_operand elseif mem_operand elseif mem_operand elseif mem_operand pixld0_s mem_operand else pixld0_s mem_operand pixld0_s mem_operand pixld0_s mem_operand pixld0_s mem_operand endif elseif mem_operand else pixld0_s mem_operand pixld0_s mem_operand endif elseif mem_operand else error unsupported mem_operand if bpp mem_operand endif endm macro vuzp8 reg2 vuzp d d &reg2 endm macro vzip8 reg2 vzip d d &reg2 endm macro pixdeinterleave basereg basereg basereg basereg basereg endif endm macro pixinterleave basereg basereg basereg basereg basereg endif endm macro PF boost_increment endif if endif PF tst PF addne PF subne PF cmp ORIG_W if endif if endif if endif PF subge ORIG_W PF subges if endif if endif if endif endif endm macro cache_preload_simple endif if dst_r_bpp pld[DST_R, #(PREFETCH_DISTANCE_SIMPLE *dst_r_bpp/8)] endif if mask_bpp pld if[MASK, #(PREFETCH_DISTANCE_SIMPLE *mask_bpp/8)] endif endif endm macro fetch_mask_pixblock pixld mask_basereg pixblock_size MASK endm macro ensure_destination_ptr_alignment process_pixblock_tail_head if beq irp skip1(dst_w_bpp<=(lowbit *8)) &&((lowbit *8)<(pixblock_size *dst_w_bpp)) .if lowbit< 16 tst DST_R
double scale
Definition: pnmhistmap.c:38
static int size
Definition: ppmlabel.c:24
bstring c int memset(void *s, int c, int length)
#define x1
#define y1
#define y2
#define x2
#define ix1
Definition: pt1.h:40
#define fx2
Definition: pt1.h:47
#define ix2
Definition: pt1.h:41
#define fx1
Definition: pt1.h:46
#define status
static void chunk(LexState *ls)
Definition: minilua.c:4678
#define prev_chunk(p)
Definition: lj_alloc.c:475
ShellFileEnvironment e
Definition: sh6.c:388
struct _cairo_clip_tor_scan_converter::@440 span_pool
cairo_edge_t * edges
cairo_status_t(* render_rows)(void *abstract_renderer, int y, int height, const cairo_half_open_span_t *coverages, unsigned num_coverages)
struct _pool_chunk * prev_chunk
struct cell_list::@439 cell_pool
struct cell embedded[32]
grid_area_t uncovered_area
grid_scaled_y_t clipped_height
struct cell * next
grid_scaled_y_t covered_height
struct quorem x
grid_scaled_y_t dy
grid_scaled_y_t ytop
struct quorem dxdy_full
grid_scaled_y_t height_left
cairo_edge_t edge
struct quorem dxdy
struct edge * next
char * line
Definition: sh.h:1693
Definition: ttf.h:354
struct edge ** y_buckets
struct polygon::@438 edge_pool
grid_scaled_y_t ymin
struct edge * edges
grid_scaled_y_t ymax
struct edge * y_buckets_embedded[64]
struct edge embedded[32]
struct _pool_chunk * current
struct _pool_chunk * first_free
struct _pool_chunk sentinel[1]
int j
Definition: t4ht.c:1589
static double prev_x
Definition: tex4ht.c:1079
return() int(((double) *(font_tbl[cur_fnt].wtbl+(int)(*(font_tbl[cur_fnt].char_wi+(int)(ch - font_tbl[cur_fnt].char_f)% 256)))/(double)(1L<< 20)) *(double) font_tbl[cur_fnt].scale)