w32tex
About: TeX Live provides a comprehensive TeX system including all the major TeX-related programs, macro packages, and fonts that are free software. Windows sources.
  Fossies Dox: w32tex-src.tar.xz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

cairo-tor-scan-converter.c
Go to the documentation of this file.
1 /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
2 /* glitter-paths - polygon scan converter
3  *
4  * Copyright (c) 2008 M Joonas Pihlaja
5  * Copyright (c) 2007 David Turner
6  *
7  * Permission is hereby granted, free of charge, to any person
8  * obtaining a copy of this software and associated documentation
9  * files (the "Software"), to deal in the Software without
10  * restriction, including without limitation the rights to use,
11  * copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following
14  * conditions:
15  *
16  * The above copyright notice and this permission notice shall be
17  * included in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 /* This is the Glitter paths scan converter incorporated into cairo.
29  * The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
30  * of
31  *
32  * https://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
33  */
34 /* Glitter-paths is a stand alone polygon rasteriser derived from
35  * David Turner's reimplementation of Tor Anderssons's 15x17
36  * supersampling rasteriser from the Apparition graphics library. The
37  * main new feature here is cheaply choosing per-scan line between
38  * doing fully analytical coverage computation for an entire row at a
39  * time vs. using a supersampling approach.
40  *
41  * David Turner's code can be found at
42  *
43  * http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
44  *
45  * In particular this file incorporates large parts of ftgrays_tor10.h
46  * from raster-comparison-20070813.tar.bz2
47  */
48 /* Overview
49  *
50  * A scan converter's basic purpose to take polygon edges and convert
51  * them into an RLE compressed A8 mask. This one works in two phases:
52  * gathering edges and generating spans.
53  *
54  * 1) As the user feeds the scan converter edges they are vertically
55  * clipped and bucketted into a _polygon_ data structure. The edges
56  * are also snapped from the user's coordinates to the subpixel grid
57  * coordinates used during scan conversion.
58  *
59  * user
60  * |
61  * | edges
62  * V
63  * polygon buckets
64  *
65  * 2) Generating spans works by performing a vertical sweep of pixel
66  * rows from top to bottom and maintaining an _active_list_ of edges
67  * that intersect the row. From the active list the fill rule
68  * determines which edges are the left and right edges of the start of
69  * each span, and their contribution is then accumulated into a pixel
70  * coverage list (_cell_list_) as coverage deltas. Once the coverage
71  * deltas of all edges are known we can form spans of constant pixel
72  * coverage by summing the deltas during a traversal of the cell list.
73  * At the end of a pixel row the cell list is sent to a coverage
74  * blitter for rendering to some target surface.
75  *
76  * The pixel coverages are computed by either supersampling the row
77  * and box filtering a mono rasterisation, or by computing the exact
78  * coverages of edges in the active list. The supersampling method is
79  * used whenever some edge starts or stops within the row or there are
80  * edge intersections in the row.
81  *
82  * polygon bucket for \
83  * current pixel row |
84  * | |
85  * | activate new edges | Repeat GRID_Y times if we
86  * V \ are supersampling this row,
87  * active list / or just once if we're computing
88  * | | analytical coverage.
89  * | coverage deltas |
90  * V |
91  * pixel coverage list /
92  * |
93  * V
94  * coverage blitter
95  */
96 #include "cairoint.h"
97 #include "cairo-spans-private.h"
98 #include "cairo-error-private.h"
99 
100 #include <stdlib.h>
101 #include <string.h>
102 #include <limits.h>
103 #include <setjmp.h>
104 
105 /*-------------------------------------------------------------------------
106  * cairo specific config
107  */
108 #define I static
109 
110 /* Prefer cairo's status type. */
111 #define GLITTER_HAVE_STATUS_T 1
112 #define GLITTER_STATUS_SUCCESS CAIRO_STATUS_SUCCESS
113 #define GLITTER_STATUS_NO_MEMORY CAIRO_STATUS_NO_MEMORY
115 
116 /* The input coordinate scale and the rasterisation grid scales. */
117 #define GLITTER_INPUT_BITS CAIRO_FIXED_FRAC_BITS
118 #define GRID_X_BITS CAIRO_FIXED_FRAC_BITS
119 #define GRID_Y 15
120 
121 /* Set glitter up to use a cairo span renderer to do the coverage
122  * blitting. */
123 struct pool;
124 struct cell_list;
125 
126 /*-------------------------------------------------------------------------
127  * glitter-paths.h
128  */
129 
130 /* "Input scaled" numbers are fixed precision reals with multiplier
131  * 2**GLITTER_INPUT_BITS. Input coordinates are given to glitter as
132  * pixel scaled numbers. These get converted to the internal grid
133  * scaled numbers as soon as possible. Internal overflow is possible
134  * if GRID_X/Y inside glitter-paths.c is larger than
135  * 1<<GLITTER_INPUT_BITS. */
136 #ifndef GLITTER_INPUT_BITS
137 # define GLITTER_INPUT_BITS 8
138 #endif
139 #define GLITTER_INPUT_SCALE (1<<GLITTER_INPUT_BITS)
141 
142 #if !GLITTER_HAVE_STATUS_T
143 typedef enum {
147 #endif
148 
149 #ifndef I
150 # define I /*static*/
151 #endif
152 
153 /* Opaque type for scan converting. */
155 
156 /* Reset a scan converter to accept polygon edges and set the clip box
157  * in pixels. Allocates O(ymax-ymin) bytes of memory. The clip box
158  * is set to integer pixel coordinates xmin <= x < xmax, ymin <= y <
159  * ymax. */
162  glitter_scan_converter_t *converter,
163  int xmin, int ymin,
164  int xmax, int ymax);
165 
166 /* Render the polygon in the scan converter to the given A8 format
167  * image raster. Only the pixels accessible as pixels[y*stride+x] for
168  * x,y inside the clip box are written to, where xmin <= x < xmax,
169  * ymin <= y < ymax. The image is assumed to be clear on input.
170  *
171  * If nonzero_fill is true then the interior of the polygon is
172  * computed with the non-zero fill rule. Otherwise the even-odd fill
173  * rule is used.
174  *
175  * The scan converter must be reset or destroyed after this call. */
176 
177 /*-------------------------------------------------------------------------
178  * glitter-paths.c: Implementation internal types
179  */
180 #include <stdlib.h>
181 #include <string.h>
182 #include <limits.h>
183 
184 /* All polygon coordinates are snapped onto a subsample grid. "Grid
185  * scaled" numbers are fixed precision reals with multiplier GRID_X or
186  * GRID_Y. */
187 typedef int grid_scaled_t;
188 typedef int grid_scaled_x_t;
189 typedef int grid_scaled_y_t;
190 
191 /* Default x/y scale factors.
192  * You can either define GRID_X/Y_BITS to get a power-of-two scale
193  * or define GRID_X/Y separately. */
194 #if !defined(GRID_X) && !defined(GRID_X_BITS)
195 # define GRID_X_BITS 8
196 #endif
197 #if !defined(GRID_Y) && !defined(GRID_Y_BITS)
198 # define GRID_Y 15
199 #endif
200 
201 /* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
202 #ifdef GRID_X_BITS
203 # define GRID_X (1 << GRID_X_BITS)
204 #endif
205 #ifdef GRID_Y_BITS
206 # define GRID_Y (1 << GRID_Y_BITS)
207 #endif
208 
209 /* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
210  * integer and fractional parts. The integer part is floored. */
211 #if defined(GRID_X_TO_INT_FRAC)
212  /* do nothing */
213 #elif defined(GRID_X_BITS)
214 # define GRID_X_TO_INT_FRAC(x, i, f) \
215  _GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)
216 #else
217 # define GRID_X_TO_INT_FRAC(x, i, f) \
218  _GRID_TO_INT_FRAC_general(x, i, f, GRID_X)
219 #endif
220 
221 #define _GRID_TO_INT_FRAC_general(t, i, f, m) do { \
222  (i) = (t) / (m); \
223  (f) = (t) % (m); \
224  if ((f) < 0) { \
225  --(i); \
226  (f) += (m); \
227  } \
228 } while (0)
229 
230 #define _GRID_TO_INT_FRAC_shift(t, i, f, b) do { \
231  (f) = (t) & ((1 << (b)) - 1); \
232  (i) = (t) >> (b); \
233 } while (0)
234 
235 /* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y. We want
236  * to be able to represent exactly areas of subpixel trapezoids whose
237  * vertices are given in grid scaled coordinates. The scale factor
238  * comes from needing to accurately represent the area 0.5*dx*dy of a
239  * triangle with base dx and height dy in grid scaled numbers. */
240 #define GRID_XY (2*GRID_X*GRID_Y) /* Unit area on the grid. */
241 
242 /* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
243 #if GRID_XY == 510
244 # define GRID_AREA_TO_ALPHA(c) (((c)+1) >> 1)
245 #elif GRID_XY == 255
246 # define GRID_AREA_TO_ALPHA(c) (c)
247 #elif GRID_XY == 64
248 # define GRID_AREA_TO_ALPHA(c) (((c) << 2) | -(((c) & 0x40) >> 6))
249 #elif GRID_XY == 128
250 # define GRID_AREA_TO_ALPHA(c) ((((c) << 1) | -((c) >> 7)) & 255)
251 #elif GRID_XY == 256
252 # define GRID_AREA_TO_ALPHA(c) (((c) | -((c) >> 8)) & 255)
253 #elif GRID_XY == 15
254 # define GRID_AREA_TO_ALPHA(c) (((c) << 4) + (c))
255 #elif GRID_XY == 2*256*15
256 # define GRID_AREA_TO_ALPHA(c) (((c) + ((c)<<4) + 256) >> 9)
257 #else
258 # define GRID_AREA_TO_ALPHA(c) (((c)*255 + GRID_XY/2) / GRID_XY)
259 #endif
260 
261 #define UNROLL3(x) x x x
262 
263 struct quorem {
264  int32_t quo;
266 };
267 
268 /* Header for a chunk of memory in a memory pool. */
269 struct _pool_chunk {
270  /* # bytes used in this chunk. */
271  size_t size;
272 
273  /* # bytes total in this chunk */
274  size_t capacity;
275 
276  /* Pointer to the previous chunk or %NULL if this is the sentinel
277  * chunk in the pool header. */
278  struct _pool_chunk *prev_chunk;
279 
280  /* Actual data starts here. Well aligned even for 64 bit types. */
282 };
283 
284 /* The int64_t data member of _pool_chunk just exists to enforce alignment,
285  * it shouldn't be included in the allocated size for the struct. */
286 #define SIZEOF_POOL_CHUNK (sizeof(struct _pool_chunk) - sizeof(int64_t))
287 
288 /* A memory pool. This is supposed to be embedded on the stack or
289  * within some other structure. It may optionally be followed by an
290  * embedded array from which requests are fulfilled until
291  * malloc needs to be called to allocate a first real chunk. */
292 struct pool {
293  /* Chunk we're allocating from. */
294  struct _pool_chunk *current;
295 
296  jmp_buf *jmp;
297 
298  /* Free list of previously allocated chunks. All have >= default
299  * capacity. */
300  struct _pool_chunk *first_free;
301 
302  /* The default capacity of a chunk. */
303  size_t default_capacity;
304 
305  /* Header for the sentinel chunk. Directly following the pool
306  * struct should be some space for embedded elements from which
307  * the sentinel chunk allocates from. This is expressed as a char
308  * array so that the 'int64_t data' member of _pool_chunk isn't
309  * included. This way embedding struct pool in other structs works
310  * without wasting space. */
312 };
313 
314 /* A polygon edge. */
315 struct edge {
316  /* Next in y-bucket or active list. */
317  struct edge *next, *prev;
318 
319  /* The clipped y of the top of the edge. */
321 
322  /* Number of subsample rows remaining to scan convert of this
323  * edge. */
325 
326  /* Original sign of the edge: +1 for downwards, -1 for upwards
327  * edges. */
328  int dir;
329  int cell;
330 
331  /* Current x coordinate while the edge is on the active
332  * list. Initialised to the x coordinate of the top of the
333  * edge. The quotient is in grid_scaled_x_t units and the
334  * remainder is mod dy in grid_scaled_y_t units.*/
335  struct quorem x;
336 
337  /* Advance of the current x when moving down a subsample line. */
338  struct quorem dxdy;
339 
340  /* Advance of the current x when moving down a full pixel
341  * row. Only initialised when the height of the edge is large
342  * enough that there's a chance the edge could be stepped by a
343  * full row's worth of subsample rows at a time. */
344  struct quorem dxdy_full;
345 
346  /* y2-y1 after orienting the edge downwards. */
348 };
349 
350 #define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/GRID_Y)
351 
352 /* A collection of sorted and vertically clipped edges of the polygon.
353  * Edges are moved from the polygon to an active list while scan
354  * converting. */
355 struct polygon {
356  /* The vertical clip extents. */
358 
359  /* Array of edges all starting in the same bucket. An edge is put
360  * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
361  * it is added to the polygon. */
362  struct edge **y_buckets;
363  struct edge *y_buckets_embedded[64];
364 
365  struct {
366  struct pool base[1];
367  struct edge embedded[32];
369 };
370 
371 /* A cell records the effect on pixel coverage of polygon edges
372  * passing through a pixel. It contains two accumulators of pixel
373  * coverage.
374  *
375  * Consider the effects of a polygon edge on the coverage of a pixel
376  * it intersects and that of the following one. The coverage of the
377  * following pixel is the height of the edge multiplied by the width
378  * of the pixel, and the coverage of the pixel itself is the area of
379  * the trapezoid formed by the edge and the right side of the pixel.
380  *
381  * +-----------------------+-----------------------+
382  * | | |
383  * | | |
384  * |_______________________|_______________________|
385  * | \...................|.......................|\
386  * | \..................|.......................| |
387  * | \.................|.......................| |
388  * | \....covered.....|.......................| |
389  * | \....area.......|.......................| } covered height
390  * | \..............|.......................| |
391  * |uncovered\.............|.......................| |
392  * | area \............|.......................| |
393  * |___________\...........|.......................|/
394  * | | |
395  * | | |
396  * | | |
397  * +-----------------------+-----------------------+
398  *
399  * Since the coverage of the following pixel will always be a multiple
400  * of the width of the pixel, we can store the height of the covered
401  * area instead. The coverage of the pixel itself is the total
402  * coverage minus the area of the uncovered area to the left of the
403  * edge. As it's faster to compute the uncovered area we only store
404  * that and subtract it from the total coverage later when forming
405  * spans to blit.
406  *
407  * The heights and areas are signed, with left edges of the polygon
408  * having positive sign and right edges having negative sign. When
409  * two edges intersect they swap their left/rightness so their
410  * contribution above and below the intersection point must be
411  * computed separately. */
412 struct cell {
413  struct cell *next;
414  int x;
417 };
418 
419 /* A cell list represents the scan line sparsely as cells ordered by
420  * ascending x. It is geared towards scanning the cells in order
421  * using an internal cursor. */
422 struct cell_list {
423  /* Sentinel nodes */
424  struct cell head, tail;
425 
426  /* Cursor state for iterating through the cell list. */
427  struct cell *cursor, *rewind;
428 
429  /* Cells in the cell list are owned by the cell list and are
430  * allocated from this pool. */
431  struct {
432  struct pool base[1];
433  struct cell embedded[32];
435 };
436 
437 struct cell_pair {
438  struct cell *cell1;
439  struct cell *cell2;
440 };
441 
442 /* The active list contains edges in the current scan line ordered by
443  * the x-coordinate of the intercept of the edge and the scan line. */
444 struct active_list {
445  /* Leftmost edge on the current scan line. */
446  struct edge head, tail;
447 
448  /* A lower bound on the height of the active edges is used to
449  * estimate how soon some active edge ends. We can't advance the
450  * scan conversion by a full pixel row if an edge ends somewhere
451  * within it. */
454 };
455 
456 struct glitter_scan_converter {
457  struct polygon polygon[1];
458  struct active_list active[1];
459  struct cell_list coverages[1];
460 
463 
464  /* Clip box. */
467 };
468 
469 static struct _pool_chunk *
471  struct _pool_chunk *p,
472  struct _pool_chunk *prev_chunk,
473  size_t capacity)
474 {
475  p->prev_chunk = prev_chunk;
476  p->size = 0;
477  p->capacity = capacity;
478  return p;
479 }
480 
481 static struct _pool_chunk *
483 {
484  struct _pool_chunk *p;
485 
487  if (unlikely (NULL == p))
489 
490  return _pool_chunk_init(p, pool->current, size);
491 }
492 
493 static void
495  jmp_buf *jmp,
496  size_t default_capacity,
497  size_t embedded_capacity)
498 {
499  pool->jmp = jmp;
500  pool->current = (void*) pool->sentinel;
501  pool->first_free = NULL;
502  pool->default_capacity = default_capacity;
503  _pool_chunk_init(pool->current, NULL, embedded_capacity);
504 }
505 
506 static void
508 {
509  struct _pool_chunk *p = pool->current;
510  do {
511  while (NULL != p) {
512  struct _pool_chunk *prev = p->prev_chunk;
513  if (p != (void *) pool->sentinel)
514  free(p);
515  p = prev;
516  }
517  p = pool->first_free;
518  pool->first_free = NULL;
519  } while (NULL != p);
520 }
521 
522 /* Satisfy an allocation by first allocating a new large enough chunk
523  * and adding it to the head of the pool's chunk list. This function
524  * is called as a fallback if pool_alloc() couldn't do a quick
525  * allocation from the current chunk in the pool. */
526 static void *
528  struct pool *pool,
529  size_t size)
530 {
531  struct _pool_chunk *chunk;
532  void *obj;
533  size_t capacity;
534 
535  /* If the allocation is smaller than the default chunk size then
536  * try getting a chunk off the free list. Force alloc of a new
537  * chunk for large requests. */
538  capacity = size;
539  chunk = NULL;
540  if (size < pool->default_capacity) {
542  chunk = pool->first_free;
543  if (chunk) {
544  pool->first_free = chunk->prev_chunk;
545  _pool_chunk_init(chunk, pool->current, chunk->capacity);
546  }
547  }
548 
549  if (NULL == chunk)
551  pool->current = chunk;
552 
553  obj = ((unsigned char*)&chunk->data + chunk->size);
554  chunk->size += size;
555  return obj;
556 }
557 
558 /* Allocate size bytes from the pool. The first allocated address
559  * returned from a pool is aligned to 8 bytes. Subsequent
560  * addresses will maintain alignment as long as multiples of 8 are
561  * allocated. Returns the address of a new memory area or %NULL on
562  * allocation failures. The pool retains ownership of the returned
563  * memory. */
564 inline static void *
565 pool_alloc (struct pool *pool, size_t size)
566 {
567  struct _pool_chunk *chunk = pool->current;
568 
569  if (size <= chunk->capacity - chunk->size) {
570  void *obj = ((unsigned char*)&chunk->data + chunk->size);
571  chunk->size += size;
572  return obj;
573  } else {
575  }
576 }
577 
578 /* Relinquish all pool_alloced memory back to the pool. */
579 static void
581 {
582  /* Transfer all used chunks to the chunk free list. */
583  struct _pool_chunk *chunk = pool->current;
584  if (chunk != (void *) pool->sentinel) {
585  while (chunk->prev_chunk != (void *) pool->sentinel) {
586  chunk = chunk->prev_chunk;
587  }
588  chunk->prev_chunk = pool->first_free;
590  }
591  /* Reset the sentinel as the current chunk. */
592  pool->current = (void *) pool->sentinel;
593  pool->current->size = 0;
594 }
595 
596 /* Rewinds the cell list's cursor to the beginning. After rewinding
597  * we're good to cell_list_find() the cell any x coordinate. */
598 inline static void
600 {
601  cells->cursor = &cells->head;
602 }
603 
604 inline static void
605 cell_list_maybe_rewind (struct cell_list *cells, int x)
606 {
607  if (x < cells->cursor->x) {
608  cells->cursor = cells->rewind;
609  if (x < cells->cursor->x)
610  cells->cursor = &cells->head;
611  }
612 }
613 
614 inline static void
616 {
617  cells->rewind = cells->cursor;
618 }
619 
620 static void
621 cell_list_init(struct cell_list *cells, jmp_buf *jmp)
622 {
623  pool_init(cells->cell_pool.base, jmp,
624  256*sizeof(struct cell),
625  sizeof(cells->cell_pool.embedded));
626  cells->tail.next = NULL;
627  cells->tail.x = INT_MAX;
628  cells->head.x = INT_MIN;
629  cells->head.next = &cells->tail;
630  cell_list_rewind (cells);
631 }
632 
633 static void
634 cell_list_fini(struct cell_list *cells)
635 {
636  pool_fini (cells->cell_pool.base);
637 }
638 
639 /* Empty the cell list. This is called at the start of every pixel
640  * row. */
641 inline static void
642 cell_list_reset (struct cell_list *cells)
643 {
644  cell_list_rewind (cells);
645  cells->head.next = &cells->tail;
646  pool_reset (cells->cell_pool.base);
647 }
648 
649 inline static struct cell *
650 cell_list_alloc (struct cell_list *cells,
651  struct cell *tail,
652  int x)
653 {
654  struct cell *cell;
655 
656  cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
657  cell->next = tail->next;
658  tail->next = cell;
659  cell->x = x;
660  *(uint32_t *)&cell->uncovered_area = 0;
661 
662  return cell;
663 }
664 
665 /* Find a cell at the given x-coordinate. Returns %NULL if a new cell
666  * needed to be allocated but couldn't be. Cells must be found with
667  * non-decreasing x-coordinate until the cell list is rewound using
668  * cell_list_rewind(). Ownership of the returned cell is retained by
669  * the cell list. */
670 inline static struct cell *
671 cell_list_find (struct cell_list *cells, int x)
672 {
673  struct cell *tail = cells->cursor;
674 
675  if (tail->x == x)
676  return tail;
677 
678  while (1) {
679  UNROLL3({
680  if (tail->next->x > x)
681  break;
682  tail = tail->next;
683  });
684  }
685 
686  if (tail->x != x)
687  tail = cell_list_alloc (cells, tail, x);
688  return cells->cursor = tail;
689 
690 }
691 
692 /* Find two cells at x1 and x2. This is exactly equivalent
693  * to
694  *
695  * pair.cell1 = cell_list_find(cells, x1);
696  * pair.cell2 = cell_list_find(cells, x2);
697  *
698  * except with less function call overhead. */
699 inline static struct cell_pair
700 cell_list_find_pair(struct cell_list *cells, int x1, int x2)
701 {
702  struct cell_pair pair;
703 
704  pair.cell1 = cells->cursor;
705  while (1) {
706  UNROLL3({
707  if (pair.cell1->next->x > x1)
708  break;
709  pair.cell1 = pair.cell1->next;
710  });
711  }
712  if (pair.cell1->x != x1)
713  pair.cell1 = cell_list_alloc (cells, pair.cell1, x1);
714 
715  pair.cell2 = pair.cell1;
716  while (1) {
717  UNROLL3({
718  if (pair.cell2->next->x > x2)
719  break;
720  pair.cell2 = pair.cell2->next;
721  });
722  }
723  if (pair.cell2->x != x2)
724  pair.cell2 = cell_list_alloc (cells, pair.cell2, x2);
725 
726  cells->cursor = pair.cell2;
727  return pair;
728 }
729 
730 /* Add a subpixel span covering [x1, x2) to the coverage cells. */
731 inline static void
735 {
736  int ix1, fx1;
737  int ix2, fx2;
738 
739  if (x1 == x2)
740  return;
741 
744 
745  if (ix1 != ix2) {
746  struct cell_pair p;
747  p = cell_list_find_pair(cells, ix1, ix2);
748  p.cell1->uncovered_area += 2*fx1;
749  ++p.cell1->covered_height;
750  p.cell2->uncovered_area -= 2*fx2;
751  --p.cell2->covered_height;
752  } else {
753  struct cell *cell = cell_list_find(cells, ix1);
754  cell->uncovered_area += 2*(fx1-fx2);
755  }
756 }
757 
758 inline static void full_step (struct edge *e)
759 {
760  if (e->dy == 0)
761  return;
762 
763  e->x.quo += e->dxdy_full.quo;
764  e->x.rem += e->dxdy_full.rem;
765  if (e->x.rem < 0) {
766  e->x.quo--;
767  e->x.rem += e->dy;
768  } else if (e->x.rem >= e->dy) {
769  ++e->x.quo;
770  e->x.rem -= e->dy;
771  }
772 
773  e->cell = e->x.quo + (e->x.rem >= e->dy/2);
774 }
775 
776 
777 /* Adds the analytical coverage of an edge crossing the current pixel
778  * row to the coverage cells and advances the edge's x position to the
779  * following row.
780  *
781  * This function is only called when we know that during this pixel row:
782  *
783  * 1) The relative order of all edges on the active list doesn't
784  * change. In particular, no edges intersect within this row to pixel
785  * precision.
786  *
787  * 2) No new edges start in this row.
788  *
789  * 3) No existing edges end mid-row.
790  *
791  * This function depends on being called with all edges from the
792  * active list in the order they appear on the list (i.e. with
793  * non-decreasing x-coordinate.) */
794 static void
796  struct edge *edge,
797  int sign)
798 {
799  struct quorem x1, x2;
801  int ix1, ix2;
802 
803  x1 = edge->x;
804  full_step (edge);
805  x2 = edge->x;
806 
807  /* Step back from the sample location (half-subrow) to the pixel origin */
808  if (edge->dy) {
809  x1.quo -= edge->dxdy.quo / 2;
810  x1.rem -= edge->dxdy.rem / 2;
811  if (x1.rem < 0) {
812  --x1.quo;
813  x1.rem += edge->dy;
814  } else if (x1.rem >= edge->dy) {
815  ++x1.quo;
816  x1.rem -= edge->dy;
817  }
818 
819  x2.quo -= edge->dxdy.quo / 2;
820  x2.rem -= edge->dxdy.rem / 2;
821  if (x2.rem < 0) {
822  --x2.quo;
823  x2.rem += edge->dy;
824  } else if (x2.rem >= edge->dy) {
825  ++x2.quo;
826  x2.rem -= edge->dy;
827  }
828  }
829 
830  GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1);
831  GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2);
832 
833  cell_list_maybe_rewind(cells, MIN(ix1, ix2));
834 
835  /* Edge is entirely within a column? */
836  if (ix1 == ix2) {
837  /* We always know that ix1 is >= the cell list cursor in this
838  * case due to the no-intersections precondition. */
839  struct cell *cell = cell_list_find(cells, ix1);
842  return;
843  }
844 
845  /* Orient the edge left-to-right. */
846  if (ix2 < ix1) {
847  struct quorem tx;
848  int t;
849 
850  t = ix1;
851  ix1 = ix2;
852  ix2 = t;
853 
854  t = fx1;
855  fx1 = fx2;
856  fx2 = t;
857 
858  tx = x1;
859  x1 = x2;
860  x2 = tx;
861  }
862 
863  /* Add coverage for all pixels [ix1,ix2] on this row crossed
864  * by the edge. */
865  {
866  struct cell_pair pair;
867  struct quorem y;
868  int64_t tmp, dx;
869  int y_last;
870 
871  dx = (x2.quo - x1.quo) * edge->dy + (x2.rem - x1.rem);
872 
873  tmp = (ix1 + 1) * GRID_X * edge->dy;
874  tmp -= x1.quo * edge->dy + x1.rem;
875  tmp *= GRID_Y;
876 
877  y.quo = tmp / dx;
878  y.rem = tmp % dx;
879 
880  /* When rendering a previous edge on the active list we may
881  * advance the cell list cursor past the leftmost pixel of the
882  * current edge even though the two edges don't intersect.
883  * e.g. consider two edges going down and rightwards:
884  *
885  * --\_+---\_+-----+-----+----
886  * \_ \_ | |
887  * | \_ | \_ | |
888  * | \_| \_| |
889  * | \_ \_ |
890  * ----+-----+-\---+-\---+----
891  *
892  * The left edge touches cells past the starting cell of the
893  * right edge. Fortunately such cases are rare.
894  */
895 
896  pair = cell_list_find_pair(cells, ix1, ix1+1);
897  pair.cell1->uncovered_area += sign*y.quo*(GRID_X + fx1);
898  pair.cell1->covered_height += sign*y.quo;
899  y_last = y.quo;
900 
901  if (ix1+1 < ix2) {
902  struct cell *cell = pair.cell2;
903  struct quorem dydx_full;
904 
905  dydx_full.quo = GRID_Y * GRID_X * edge->dy / dx;
906  dydx_full.rem = GRID_Y * GRID_X * edge->dy % dx;
907 
908  ++ix1;
909  do {
910  y.quo += dydx_full.quo;
911  y.rem += dydx_full.rem;
912  if (y.rem >= dx) {
913  y.quo++;
914  y.rem -= dx;
915  }
916 
917  cell->uncovered_area += sign*(y.quo - y_last)*GRID_X;
918  cell->covered_height += sign*(y.quo - y_last);
919  y_last = y.quo;
920 
921  ++ix1;
922  cell = cell_list_find(cells, ix1);
923  } while (ix1 != ix2);
924 
925  pair.cell2 = cell;
926  }
927  pair.cell2->uncovered_area += sign*(GRID_Y - y_last)*fx2;
928  pair.cell2->covered_height += sign*(GRID_Y - y_last);
929  }
930 }
931 
932 static void
933 polygon_init (struct polygon *polygon, jmp_buf *jmp)
934 {
935  polygon->ymin = polygon->ymax = 0;
938  8192 - sizeof (struct _pool_chunk),
939  sizeof (polygon->edge_pool.embedded));
940 }
941 
942 static void
944 {
947 
949 }
950 
951 /* Empties the polygon of all edges. The polygon is then prepared to
952  * receive new edges and clip them to the vertical range
953  * [ymin,ymax). */
954 static glitter_status_t
956  grid_scaled_y_t ymin,
957  grid_scaled_y_t ymax)
958 {
959  unsigned h = ymax - ymin;
960  unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + GRID_Y-1, ymin);
961 
963 
964  if (unlikely (h > 0x7FFFFFFFU - GRID_Y))
965  goto bail_no_mem; /* even if you could, you wouldn't want to. */
966 
969 
971  if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) {
972  polygon->y_buckets = _cairo_malloc_ab (num_buckets,
973  sizeof (struct edge *));
974  if (unlikely (NULL == polygon->y_buckets))
975  goto bail_no_mem;
976  }
977  memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
978 
979  polygon->ymin = ymin;
980  polygon->ymax = ymax;
981  return GLITTER_STATUS_SUCCESS;
982 
983 bail_no_mem:
984  polygon->ymin = 0;
985  polygon->ymax = 0;
987 }
988 
989 static void
991  struct edge *e)
992 {
993  unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin);
994  struct edge **ptail = &polygon->y_buckets[ix];
995  e->next = *ptail;
996  *ptail = e;
997 }
998 
999 static void
1001 {
1002  active->head.height_left = INT_MAX;
1003  active->head.dy = 0;
1004  active->head.cell = INT_MIN;
1005  active->head.prev = NULL;
1006  active->head.next = &active->tail;
1007  active->tail.prev = &active->head;
1008  active->tail.next = NULL;
1009  active->tail.cell = INT_MAX;
1010  active->tail.height_left = INT_MAX;
1011  active->tail.dy = 0;
1012  active->min_height = 0;
1013  active->is_vertical = 1;
1014 }
1015 
1016 static void
1018 {
1020 }
1021 
1022 /*
1023  * Merge two sorted edge lists.
1024  * Input:
1025  * - head_a: The head of the first list.
1026  * - head_b: The head of the second list; head_b cannot be NULL.
1027  * Output:
1028  * Returns the head of the merged list.
1029  *
1030  * Implementation notes:
1031  * To make it fast (in particular, to reduce to an insertion sort whenever
1032  * one of the two input lists only has a single element) we iterate through
1033  * a list until its head becomes greater than the head of the other list,
1034  * then we switch their roles. As soon as one of the two lists is empty, we
1035  * just attach the other one to the current list and exit.
1036  * Writes to memory are only needed to "switch" lists (as it also requires
1037  * attaching to the output list the list which we will be iterating next) and
1038  * to attach the last non-empty list.
1039  */
1040 static struct edge *
1041 merge_sorted_edges (struct edge *head_a, struct edge *head_b)
1042 {
1043  struct edge *head, **next, *prev;
1044  int32_t x;
1045 
1046  prev = head_a->prev;
1047  next = &head;
1048  if (head_a->cell <= head_b->cell) {
1049  head = head_a;
1050  } else {
1051  head = head_b;
1052  head_b->prev = prev;
1053  goto start_with_b;
1054  }
1055 
1056  do {
1057  x = head_b->cell;
1058  while (head_a != NULL && head_a->cell <= x) {
1059  prev = head_a;
1060  next = &head_a->next;
1061  head_a = head_a->next;
1062  }
1063 
1064  head_b->prev = prev;
1065  *next = head_b;
1066  if (head_a == NULL)
1067  return head;
1068 
1069 start_with_b:
1070  x = head_a->cell;
1071  while (head_b != NULL && head_b->cell <= x) {
1072  prev = head_b;
1073  next = &head_b->next;
1074  head_b = head_b->next;
1075  }
1076 
1077  head_a->prev = prev;
1078  *next = head_a;
1079  if (head_b == NULL)
1080  return head;
1081  } while (1);
1082 }
1083 
1084 /*
1085  * Sort (part of) a list.
1086  * Input:
1087  * - list: The list to be sorted; list cannot be NULL.
1088  * - limit: Recursion limit.
1089  * Output:
1090  * - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
1091  * input list; if the input list has fewer elements, head_out be a sorted list
1092  * containing all the elements of the input list.
1093  * Returns the head of the list of unprocessed elements (NULL if the sorted list contains
1094  * all the elements of the input list).
1095  *
1096  * Implementation notes:
1097  * Special case single element list, unroll/inline the sorting of the first two elements.
1098  * Some tail recursion is used since we iterate on the bottom-up solution of the problem
1099  * (we start with a small sorted list and keep merging other lists of the same size to it).
1100  */
1101 static struct edge *
1103  unsigned int level,
1104  struct edge **head_out)
1105 {
1106  struct edge *head_other, *remaining;
1107  unsigned int i;
1108 
1109  head_other = list->next;
1110 
1111  if (head_other == NULL) {
1112  *head_out = list;
1113  return NULL;
1114  }
1115 
1116  remaining = head_other->next;
1117  if (list->cell <= head_other->cell) {
1118  *head_out = list;
1119  head_other->next = NULL;
1120  } else {
1121  *head_out = head_other;
1122  head_other->prev = list->prev;
1123  head_other->next = list;
1124  list->prev = head_other;
1125  list->next = NULL;
1126  }
1127 
1128  for (i = 0; i < level && remaining; i++) {
1129  remaining = sort_edges (remaining, i, &head_other);
1130  *head_out = merge_sorted_edges (*head_out, head_other);
1131  }
1132 
1133  return remaining;
1134 }
1135 
1136  static struct edge *
1137 merge_unsorted_edges (struct edge *head, struct edge *unsorted)
1138 {
1139  sort_edges (unsorted, UINT_MAX, &unsorted);
1140  return merge_sorted_edges (head, unsorted);
1141 }
1142 
1143 /* Test if the edges on the active list can be safely advanced by a
1144  * full row without intersections or any edges ending. */
1145 inline static int
1147 {
1148  const struct edge *e;
1149  int prev_x = INT_MIN;
1150 
1151  /* Recomputes the minimum height of all edges on the active
1152  * list if we have been dropping edges. */
1153  if (active->min_height <= 0) {
1154  int min_height = INT_MAX;
1155  int is_vertical = 1;
1156 
1157  e = active->head.next;
1158  while (NULL != e) {
1159  if (e->height_left < min_height)
1160  min_height = e->height_left;
1161  is_vertical &= e->dy == 0;
1162  e = e->next;
1163  }
1164 
1165  active->is_vertical = is_vertical;
1166  active->min_height = min_height;
1167  }
1168 
1169  if (active->min_height < GRID_Y)
1170  return 0;
1171 
1172  /* Check for intersections as no edges end during the next row. */
1173  for (e = active->head.next; e != &active->tail; e = e->next) {
1174  int cell;
1175 
1176  if (e->dy) {
1177  struct quorem x = e->x;
1178  x.quo += e->dxdy_full.quo;
1179  x.rem += e->dxdy_full.rem;
1180  if (x.rem < 0) {
1181  x.quo--;
1182  x.rem += e->dy;
1183  } else if (x.rem >= e->dy) {
1184  x.quo++;
1185  x.rem -= e->dy;
1186  }
1187  cell = x.quo + (x.rem >= e->dy/2);
1188  } else
1189  cell = e->cell;
1190 
1191  if (cell < prev_x)
1192  return 0;
1193 
1194  prev_x = cell;
1195  }
1196 
1197  return 1;
1198 }
1199 
1200 /* Merges edges on the given subpixel row from the polygon to the
1201  * active_list. */
1202 inline static void
1204  struct edge *edges)
1205 {
1206  active->head.next = merge_unsorted_edges (active->head.next, edges);
1207 }
1208 
1209 inline static int
1211  struct edge *edge,
1212  int y,
1213  struct edge **buckets)
1214 {
1215  grid_scaled_y_t min_height = active->min_height;
1216  int is_vertical = active->is_vertical;
1217  int max_suby = 0;
1218 
1219  while (edge) {
1220  struct edge *next = edge->next;
1221  int suby = edge->ytop - y;
1222  if (buckets[suby])
1223  buckets[suby]->prev = edge;
1224  edge->next = buckets[suby];
1225  edge->prev = NULL;
1226  buckets[suby] = edge;
1227  if (edge->height_left < min_height)
1228  min_height = edge->height_left;
1229  is_vertical &= edge->dy == 0;
1230  edge = next;
1231  if (suby > max_suby)
1232  max_suby = suby;
1233  }
1234 
1235  active->is_vertical = is_vertical;
1236  active->min_height = min_height;
1237 
1238  return max_suby;
1239 }
1240 
1241 static void step (struct edge *edge)
1242 {
1243  if (edge->dy == 0)
1244  return;
1245 
1246  edge->x.quo += edge->dxdy.quo;
1247  edge->x.rem += edge->dxdy.rem;
1248  if (edge->x.rem < 0) {
1249  --edge->x.quo;
1250  edge->x.rem += edge->dy;
1251  } else if (edge->x.rem >= edge->dy) {
1252  ++edge->x.quo;
1253  edge->x.rem -= edge->dy;
1254  }
1255 
1256  edge->cell = edge->x.quo + (edge->x.rem >= edge->dy/2);
1257 }
1258 
1259 inline static void
1261  struct cell_list *coverages,
1262  unsigned int mask)
1263 {
1264  struct edge *edge = active->head.next;
1265  int xstart = INT_MIN, prev_x = INT_MIN;
1266  int winding = 0;
1267 
1268  cell_list_rewind (coverages);
1269 
1270  while (&active->tail != edge) {
1271  struct edge *next = edge->next;
1272  int xend = edge->cell;
1273 
1274  if (--edge->height_left) {
1275  step (edge);
1276 
1277  if (edge->cell < prev_x) {
1278  struct edge *pos = edge->prev;
1279  pos->next = next;
1280  next->prev = pos;
1281  do {
1282  pos = pos->prev;
1283  } while (edge->cell < pos->cell);
1284  pos->next->prev = edge;
1285  edge->next = pos->next;
1286  edge->prev = pos;
1287  pos->next = edge;
1288  } else
1289  prev_x = edge->cell;
1290  active->min_height = -1;
1291  } else {
1292  edge->prev->next = next;
1293  next->prev = edge->prev;
1294  }
1295 
1296  winding += edge->dir;
1297  if ((winding & mask) == 0) {
1298  if (next->cell != xend) {
1299  cell_list_add_subspan (coverages, xstart, xend);
1300  xstart = INT_MIN;
1301  }
1302  } else if (xstart == INT_MIN)
1303  xstart = xend;
1304 
1305  edge = next;
1306  }
1307 }
1308 
1309 inline static void dec (struct active_list *a, struct edge *e, int h)
1310 {
1311  e->height_left -= h;
1312  if (e->height_left == 0) {
1313  e->prev->next = e->next;
1314  e->next->prev = e->prev;
1315  a->min_height = -1;
1316  }
1317 }
1318 
1319 static void
1321  struct cell_list *coverages,
1322  unsigned int mask)
1323 {
1324  struct edge *left = active->head.next;
1325 
1326  while (&active->tail != left) {
1327  struct edge *right;
1328  int winding;
1329 
1330  dec (active, left, GRID_Y);
1331 
1332  winding = left->dir;
1333  right = left->next;
1334  do {
1335  dec (active, right, GRID_Y);
1336 
1337  winding += right->dir;
1338  if ((winding & mask) == 0 && right->next->cell != right->cell)
1339  break;
1340 
1341  full_step (right);
1342 
1343  right = right->next;
1344  } while (1);
1345 
1346  cell_list_set_rewind (coverages);
1347  cell_list_render_edge (coverages, left, +1);
1348  cell_list_render_edge (coverages, right, -1);
1349 
1350  left = right->next;
1351  }
1352 }
1353 
1354 static void
1356 {
1357  polygon_init(converter->polygon, jmp);
1358  active_list_init(converter->active);
1359  cell_list_init(converter->coverages, jmp);
1360  converter->xmin=0;
1361  converter->ymin=0;
1362  converter->xmax=0;
1363  converter->ymax=0;
1364 }
1365 
1366 static void
1368 {
1369  if (self->spans != self->spans_embedded)
1370  free (self->spans);
1371 
1372  polygon_fini(self->polygon);
1373  cell_list_fini(self->coverages);
1374 
1375  self->xmin=0;
1376  self->ymin=0;
1377  self->xmax=0;
1378  self->ymax=0;
1379 }
1380 
1381 static grid_scaled_t
1383 {
1384  /* Clamp to max/min representable scaled number. */
1385  if (i >= 0) {
1386  if (i >= INT_MAX/scale)
1387  i = INT_MAX/scale;
1388  }
1389  else {
1390  if (i <= INT_MIN/scale)
1391  i = INT_MIN/scale;
1392  }
1393  return i*scale;
1394 }
1395 
1396 #define int_to_grid_scaled_x(x) int_to_grid_scaled((x), GRID_X)
1397 #define int_to_grid_scaled_y(x) int_to_grid_scaled((x), GRID_Y)
1398 
1401  glitter_scan_converter_t *converter,
1402  int xmin, int ymin,
1403  int xmax, int ymax)
1404 {
1406  int max_num_spans;
1407 
1408  converter->xmin = 0; converter->xmax = 0;
1409  converter->ymin = 0; converter->ymax = 0;
1410 
1411  max_num_spans = xmax - xmin + 1;
1412 
1413  if (max_num_spans > ARRAY_LENGTH(converter->spans_embedded)) {
1414  converter->spans = _cairo_malloc_ab (max_num_spans,
1415  sizeof (cairo_half_open_span_t));
1416  if (unlikely (converter->spans == NULL))
1418  } else
1419  converter->spans = converter->spans_embedded;
1420 
1421  xmin = int_to_grid_scaled_x(xmin);
1422  ymin = int_to_grid_scaled_y(ymin);
1423  xmax = int_to_grid_scaled_x(xmax);
1424  ymax = int_to_grid_scaled_y(ymax);
1425 
1426  active_list_reset(converter->active);
1427  cell_list_reset(converter->coverages);
1428  status = polygon_reset(converter->polygon, ymin, ymax);
1429  if (status)
1430  return status;
1431 
1432  converter->xmin = xmin;
1433  converter->xmax = xmax;
1434  converter->ymin = ymin;
1435  converter->ymax = ymax;
1436  return GLITTER_STATUS_SUCCESS;
1437 }
1438 
1439 /* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
1440  * These macros convert an input coordinate in the client's
1441  * device space to the rasterisation grid.
1442  */
1443 /* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
1444  * shifts if possible, and something saneish if not.
1445  */
1446 #if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS
1447 # define INPUT_TO_GRID_Y(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_Y_BITS)
1448 #else
1449 # define INPUT_TO_GRID_Y(in, out) INPUT_TO_GRID_general(in, out, GRID_Y)
1450 #endif
1451 
1452 #if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS) && GRID_X_BITS <= GLITTER_INPUT_BITS
1453 # define INPUT_TO_GRID_X(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_X_BITS)
1454 #else
1455 # define INPUT_TO_GRID_X(in, out) INPUT_TO_GRID_general(in, out, GRID_X)
1456 #endif
1457 
1458 #define INPUT_TO_GRID_general(in, out, grid_scale) do { \
1459  long long tmp__ = (long long)(grid_scale) * (in); \
1460  tmp__ += 1 << (GLITTER_INPUT_BITS-1); \
1461  tmp__ >>= GLITTER_INPUT_BITS; \
1462  (out) = tmp__; \
1463 } while (0)
1464 
1465 inline static void
1467  const cairo_edge_t *edge)
1468 {
1469  struct edge *e;
1470  grid_scaled_y_t ytop, ybot;
1471  const cairo_point_t *p1, *p2;
1472 
1473  INPUT_TO_GRID_Y (edge->top, ytop);
1474  if (ytop < polygon->ymin)
1475  ytop = polygon->ymin;
1476 
1477  INPUT_TO_GRID_Y (edge->bottom, ybot);
1478  if (ybot > polygon->ymax)
1479  ybot = polygon->ymax;
1480 
1481  if (ybot <= ytop)
1482  return;
1483 
1484  e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
1485 
1486  e->ytop = ytop;
1487  e->height_left = ybot - ytop;
1488  if (edge->line.p2.y > edge->line.p1.y) {
1489  e->dir = edge->dir;
1490  p1 = &edge->line.p1;
1491  p2 = &edge->line.p2;
1492  } else {
1493  e->dir = -edge->dir;
1494  p1 = &edge->line.p2;
1495  p2 = &edge->line.p1;
1496  }
1497 
1498  if (p2->x == p1->x) {
1499  e->cell = p1->x;
1500  e->x.quo = p1->x;
1501  e->x.rem = 0;
1502  e->dxdy.quo = e->dxdy.rem = 0;
1503  e->dxdy_full.quo = e->dxdy_full.rem = 0;
1504  e->dy = 0;
1505  } else {
1506  int64_t Ex, Ey, tmp;
1507 
1508  Ex = (int64_t)(p2->x - p1->x) * GRID_X;
1509  Ey = (int64_t)(p2->y - p1->y) * GRID_Y * (2 << GLITTER_INPUT_BITS);
1510 
1511  e->dxdy.quo = Ex * (2 << GLITTER_INPUT_BITS) / Ey;
1512  e->dxdy.rem = Ex * (2 << GLITTER_INPUT_BITS) % Ey;
1513 
1514  tmp = (int64_t)(2*ytop + 1) << GLITTER_INPUT_BITS;
1515  tmp -= (int64_t)p1->y * GRID_Y * 2;
1516  tmp *= Ex;
1517  e->x.quo = tmp / Ey;
1518  e->x.rem = tmp % Ey;
1519 
1521  e->x.quo += p1->x;
1522 #else
1523  tmp = (int64_t)p1->x * GRID_X;
1524  e->x.quo += tmp >> GLITTER_INPUT_BITS;
1525  e->x.rem += ((tmp & ((1 << GLITTER_INPUT_BITS) - 1)) * Ey) / (1 << GLITTER_INPUT_BITS);
1526 #endif
1527 
1528  if (e->x.rem < 0) {
1529  e->x.quo--;
1530  e->x.rem += Ey;
1531  } else if (e->x.rem >= Ey) {
1532  e->x.quo++;
1533  e->x.rem -= Ey;
1534  }
1535 
1536  if (e->height_left >= GRID_Y) {
1537  tmp = Ex * (2 * GRID_Y << GLITTER_INPUT_BITS);
1538  e->dxdy_full.quo = tmp / Ey;
1539  e->dxdy_full.rem = tmp % Ey;
1540  } else
1541  e->dxdy_full.quo = e->dxdy_full.rem = 0;
1542 
1543  e->cell = e->x.quo + (e->x.rem >= Ey/2);
1544  e->dy = Ey;
1545  }
1546 
1548 }
1549 
1550 /* Add a new polygon edge from pixel (x1,y1) to (x2,y2) to the scan
1551  * converter. The coordinates represent pixel positions scaled by
1552  * 2**GLITTER_PIXEL_BITS. If this function fails then the scan
1553  * converter should be reset or destroyed. Dir must be +1 or -1,
1554  * with the latter reversing the orientation of the edge. */
1555 I void
1557  const cairo_edge_t *edge)
1558 {
1559  polygon_add_edge (converter->polygon, edge);
1560 }
1561 
1562 static void
1564 {
1565  struct edge *edge;
1566 
1567  count *= GRID_Y;
1568  for (edge = active->head.next; edge != &active->tail; edge = edge->next) {
1569  edge->height_left -= count;
1570  if (! edge->height_left) {
1571  edge->prev->next = edge->next;
1572  edge->next->prev = edge->prev;
1573  active->min_height = -1;
1574  }
1575  }
1576 }
1577 
1578 static glitter_status_t
1579 blit_a8 (struct cell_list *cells,
1580  cairo_span_renderer_t *renderer,
1581  cairo_half_open_span_t *spans,
1582  int y, int height,
1583  int xmin, int xmax)
1584 {
1585  struct cell *cell = cells->head.next;
1586  int prev_x = xmin, last_x = -1;
1587  int16_t cover = 0, last_cover = 0;
1588  unsigned num_spans;
1589 
1590  if (cell == &cells->tail)
1591  return CAIRO_STATUS_SUCCESS;
1592 
1593  /* Skip cells to the left of the clip region. */
1594  while (cell->x < xmin) {
1596  cell = cell->next;
1597  }
1598  cover *= GRID_X*2;
1599 
1600  /* Form the spans from the coverages and areas. */
1601  num_spans = 0;
1602  for (; cell->x < xmax; cell = cell->next) {
1603  int x = cell->x;
1604  int16_t area;
1605 
1606  if (x > prev_x && cover != last_cover) {
1607  spans[num_spans].x = prev_x;
1608  spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1609  last_cover = cover;
1610  last_x = prev_x;
1611  ++num_spans;
1612  }
1613 
1615  area = cover - cell->uncovered_area;
1616 
1617  if (area != last_cover) {
1618  spans[num_spans].x = x;
1619  spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area);
1620  last_cover = area;
1621  last_x = x;
1622  ++num_spans;
1623  }
1624 
1625  prev_x = x+1;
1626  }
1627 
1628  if (prev_x <= xmax && cover != last_cover) {
1629  spans[num_spans].x = prev_x;
1630  spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1631  last_cover = cover;
1632  last_x = prev_x;
1633  ++num_spans;
1634  }
1635 
1636  if (last_x < xmax && last_cover) {
1637  spans[num_spans].x = xmax;
1638  spans[num_spans].coverage = 0;
1639  ++num_spans;
1640  }
1641 
1642  /* Dump them into the renderer. */
1643  return renderer->render_rows (renderer, y, height, spans, num_spans);
1644 }
1645 
1646 #define GRID_AREA_TO_A1(A) ((GRID_AREA_TO_ALPHA (A) > 127) ? 255 : 0)
1647 static glitter_status_t
1648 blit_a1 (struct cell_list *cells,
1649  cairo_span_renderer_t *renderer,
1650  cairo_half_open_span_t *spans,
1651  int y, int height,
1652  int xmin, int xmax)
1653 {
1654  struct cell *cell = cells->head.next;
1655  int prev_x = xmin, last_x = -1;
1656  int16_t cover = 0;
1657  uint8_t coverage, last_cover = 0;
1658  unsigned num_spans;
1659 
1660  if (cell == &cells->tail)
1661  return CAIRO_STATUS_SUCCESS;
1662 
1663  /* Skip cells to the left of the clip region. */
1664  while (cell->x < xmin) {
1666  cell = cell->next;
1667  }
1668  cover *= GRID_X*2;
1669 
1670  /* Form the spans from the coverages and areas. */
1671  num_spans = 0;
1672  for (; cell->x < xmax; cell = cell->next) {
1673  int x = cell->x;
1674  int16_t area;
1675 
1677  if (x > prev_x && coverage != last_cover) {
1678  last_x = spans[num_spans].x = prev_x;
1679  last_cover = spans[num_spans].coverage = coverage;
1680  ++num_spans;
1681  }
1682 
1684  area = cover - cell->uncovered_area;
1685 
1686  coverage = GRID_AREA_TO_A1 (area);
1687  if (coverage != last_cover) {
1688  last_x = spans[num_spans].x = x;
1689  last_cover = spans[num_spans].coverage = coverage;
1690  ++num_spans;
1691  }
1692 
1693  prev_x = x+1;
1694  }
1695 
1697  if (prev_x <= xmax && coverage != last_cover) {
1698  last_x = spans[num_spans].x = prev_x;
1699  last_cover = spans[num_spans].coverage = coverage;
1700  ++num_spans;
1701  }
1702 
1703  if (last_x < xmax && last_cover) {
1704  spans[num_spans].x = xmax;
1705  spans[num_spans].coverage = 0;
1706  ++num_spans;
1707  }
1708  if (num_spans == 1)
1709  return CAIRO_STATUS_SUCCESS;
1710 
1711  /* Dump them into the renderer. */
1712  return renderer->render_rows (renderer, y, height, spans, num_spans);
1713 }
1714 
1715 
1716 I void
1718  unsigned int winding_mask,
1719  int antialias,
1720  cairo_span_renderer_t *renderer)
1721 {
1722  int i, j;
1723  int ymax_i = converter->ymax / GRID_Y;
1724  int ymin_i = converter->ymin / GRID_Y;
1725  int xmin_i, xmax_i;
1726  int h = ymax_i - ymin_i;
1727  struct polygon *polygon = converter->polygon;
1728  struct cell_list *coverages = converter->coverages;
1729  struct active_list *active = converter->active;
1730  struct edge *buckets[GRID_Y] = { 0 };
1731 
1732  xmin_i = converter->xmin / GRID_X;
1733  xmax_i = converter->xmax / GRID_X;
1734  if (xmin_i >= xmax_i)
1735  return;
1736 
1737  /* Render each pixel row. */
1738  for (i = 0; i < h; i = j) {
1739  int do_full_row = 0;
1740 
1741  j = i + 1;
1742 
1743  /* Determine if we can ignore this row or use the full pixel
1744  * stepper. */
1746  polygon->y_buckets[i],
1747  (i+ymin_i)*GRID_Y,
1748  buckets) == 0) {
1749  if (buckets[0]) {
1751  buckets[0] = NULL;
1752  }
1753 
1754  if (active->head.next == &active->tail) {
1755  active->min_height = INT_MAX;
1756  active->is_vertical = 1;
1757  for (; j < h && ! polygon->y_buckets[j]; j++)
1758  ;
1759  continue;
1760  }
1761 
1762  do_full_row = can_do_full_row (active);
1763  }
1764 
1765  if (do_full_row) {
1766  /* Step by a full pixel row's worth. */
1767  full_row (active, coverages, winding_mask);
1768 
1769  if (active->is_vertical) {
1770  while (j < h &&
1771  polygon->y_buckets[j] == NULL &&
1772  active->min_height >= 2*GRID_Y)
1773  {
1774  active->min_height -= GRID_Y;
1775  j++;
1776  }
1777  if (j != i + 1)
1778  step_edges (active, j - (i + 1));
1779  }
1780  } else {
1781  int sub;
1782 
1783  /* Subsample this row. */
1784  for (sub = 0; sub < GRID_Y; sub++) {
1785  if (buckets[sub]) {
1787  buckets[sub] = NULL;
1788  }
1789  sub_row (active, coverages, winding_mask);
1790  }
1791  }
1792 
1793  if (antialias)
1794  blit_a8 (coverages, renderer, converter->spans,
1795  i+ymin_i, j-i, xmin_i, xmax_i);
1796  else
1797  blit_a1 (coverages, renderer, converter->spans,
1798  i+ymin_i, j-i, xmin_i, xmax_i);
1799  cell_list_reset (coverages);
1800 
1801  active->min_height -= GRID_Y;
1802  }
1803 }
1804 
1807 
1811 
1812  jmp_buf jmp;
1813 };
1814 
1816 
1817 static void
1819 {
1821  if (self == NULL) {
1822  return;
1823  }
1824  _glitter_scan_converter_fini (self->converter);
1825  free(self);
1826 }
1827 
1830  const cairo_polygon_t *polygon)
1831 {
1833  int i;
1834 
1835 #if 0
1836  FILE *file = fopen ("polygon.txt", "w");
1838  fclose (file);
1839 #endif
1840 
1841  for (i = 0; i < polygon->num_edges; i++)
1842  glitter_scan_converter_add_edge (self->converter, &polygon->edges[i]);
1843 
1844  return CAIRO_STATUS_SUCCESS;
1845 }
1846 
1847 static cairo_status_t
1849  cairo_span_renderer_t *renderer)
1850 {
1853 
1854  if ((status = setjmp (self->jmp)))
1856 
1857  glitter_scan_converter_render (self->converter,
1858  self->fill_rule == CAIRO_FILL_RULE_WINDING ? ~0 : 1,
1859  self->antialias != CAIRO_ANTIALIAS_NONE,
1860  renderer);
1861  return CAIRO_STATUS_SUCCESS;
1862 }
1863 
1866  int ymin,
1867  int xmax,
1868  int ymax,
1871 {
1874 
1875  self = _cairo_malloc (sizeof(struct _cairo_tor_scan_converter));
1876  if (unlikely (self == NULL)) {
1878  goto bail_nomem;
1879  }
1880 
1881  self->base.destroy = _cairo_tor_scan_converter_destroy;
1882  self->base.generate = _cairo_tor_scan_converter_generate;
1883 
1884  _glitter_scan_converter_init (self->converter, &self->jmp);
1885  status = glitter_scan_converter_reset (self->converter,
1886  xmin, ymin, xmax, ymax);
1887  if (unlikely (status))
1888  goto bail;
1889 
1890  self->fill_rule = fill_rule;
1891  self->antialias = antialias;
1892 
1893  return &self->base;
1894 
1895  bail:
1896  self->base.destroy(&self->base);
1897  bail_nomem:
1899 }
int level
Definition: afm2pl.c:1694
#define active
Definition: aptex-macros.h:325
#define head
Definition: aptex-macros.h:513
#define count(a)
Definition: aptex-macros.h:781
#define height(a)
Definition: aptex-macros.h:200
#define next(a)
Definition: aptex-macros.h:924
#define tail
Definition: aptex-macros.h:514
char * p2
Definition: bmpfont.h:62
char * p1
Definition: bmpfont.h:62
void _cairo_debug_print_polygon(FILE *stream, cairo_polygon_t *polygon)
Definition: cairo-debug.c:271
cairo_status_t _cairo_error(cairo_status_t status)
Definition: cairo-error.c:65
#define _cairo_malloc_ab(a, size)
#define _cairo_malloc(size)
cairo_status_t _cairo_scan_converter_set_error(void *abstract_converter, cairo_status_t error)
Definition: cairo-spans.c:58
cairo_scan_converter_t * _cairo_scan_converter_create_in_error(cairo_status_t error)
Definition: cairo-spans.c:81
static void polygon_add_edge(struct polygon *polygon, const cairo_edge_t *edge)
static struct edge * merge_sorted_edges(struct edge *head_a, struct edge *head_b)
static glitter_status_t blit_a1(struct cell_list *cells, cairo_span_renderer_t *renderer, cairo_half_open_span_t *spans, int y, int height, int xmin, int xmax)
static void cell_list_maybe_rewind(struct cell_list *cells, int x)
static void _cairo_tor_scan_converter_destroy(void *converter)
static void active_list_merge_edges_from_bucket(struct active_list *active, struct edge *edges)
cairo_status_t glitter_status_t
static void cell_list_fini(struct cell_list *cells)
static void cell_list_reset(struct cell_list *cells)
int grid_scaled_y_t
static cairo_status_t _cairo_tor_scan_converter_generate(void *converter, cairo_span_renderer_t *renderer)
#define int_to_grid_scaled_y(x)
#define SIZEOF_POOL_CHUNK
int grid_scaled_x_t
static glitter_status_t polygon_reset(struct polygon *polygon, grid_scaled_y_t ymin, grid_scaled_y_t ymax)
static void full_step(struct edge *e)
static void _polygon_insert_edge_into_its_y_bucket(struct polygon *polygon, struct edge *e)
static struct _pool_chunk * _pool_chunk_init(struct _pool_chunk *p, struct _pool_chunk *prev_chunk, size_t capacity)
static void pool_init(struct pool *pool, jmp_buf *jmp, size_t default_capacity, size_t embedded_capacity)
static struct edge * sort_edges(struct edge *list, unsigned int level, struct edge **head_out)
static void sub_row(struct active_list *active, struct cell_list *coverages, unsigned int mask)
static void full_row(struct active_list *active, struct cell_list *coverages, unsigned int mask)
static struct _pool_chunk * _pool_chunk_create(struct pool *pool, size_t size)
static struct cell * cell_list_find(struct cell_list *cells, int x)
int glitter_input_scaled_t
static void active_list_init(struct active_list *active)
static struct cell_pair cell_list_find_pair(struct cell_list *cells, int x1, int x2)
static void step_edges(struct active_list *active, int count)
#define GLITTER_INPUT_BITS
static void cell_list_init(struct cell_list *cells, jmp_buf *jmp)
static void dec(struct active_list *a, struct edge *e, int h)
static int polygon_fill_buckets(struct active_list *active, struct edge *edge, int y, struct edge **buckets)
#define GLITTER_STATUS_NO_MEMORY
#define I
static void active_list_reset(struct active_list *active)
static void glitter_scan_converter_add_edge(glitter_scan_converter_t *converter, const cairo_edge_t *edge)
cairo_scan_converter_t * _cairo_tor_scan_converter_create(int xmin, int ymin, int xmax, int ymax, cairo_fill_rule_t fill_rule, cairo_antialias_t antialias)
static void * _pool_alloc_from_new_chunk(struct pool *pool, size_t size)
static glitter_status_t blit_a8(struct cell_list *cells, cairo_span_renderer_t *renderer, cairo_half_open_span_t *spans, int y, int height, int xmin, int xmax)
static int can_do_full_row(struct active_list *active)
static void cell_list_set_rewind(struct cell_list *cells)
static void pool_reset(struct pool *pool)
static void pool_fini(struct pool *pool)
static struct edge * merge_unsorted_edges(struct edge *head, struct edge *unsorted)
static void _glitter_scan_converter_fini(glitter_scan_converter_t *self)
int grid_scaled_t
static struct cell * cell_list_alloc(struct cell_list *cells, struct cell *tail, int x)
static grid_scaled_t int_to_grid_scaled(int i, int scale)
#define GRID_AREA_TO_ALPHA(c)
#define EDGE_Y_BUCKET_INDEX(y, ymin)
#define GRID_X_TO_INT_FRAC(x, i, f)
static void _glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
static glitter_status_t glitter_scan_converter_reset(glitter_scan_converter_t *converter, int xmin, int ymin, int xmax, int ymax)
#define GRID_X_BITS
#define UNROLL3(x)
static void glitter_scan_converter_render(glitter_scan_converter_t *converter, unsigned int winding_mask, int antialias, cairo_span_renderer_t *renderer)
static void * pool_alloc(struct pool *pool, size_t size)
static void polygon_init(struct polygon *polygon, jmp_buf *jmp)
#define GRID_AREA_TO_A1(A)
static void cell_list_render_edge(struct cell_list *cells, struct edge *edge, int sign)
#define int_to_grid_scaled_x(x)
static void cell_list_add_subspan(struct cell_list *cells, grid_scaled_x_t x1, grid_scaled_x_t x2)
#define GRID_Y
static void step(struct edge *edge)
cairo_status_t _cairo_tor_scan_converter_add_polygon(void *converter, const cairo_polygon_t *polygon)
static void polygon_fini(struct polygon *polygon)
#define GRID_X
static void cell_list_rewind(struct cell_list *cells)
#define INPUT_TO_GRID_Y(in, out)
#define GLITTER_STATUS_SUCCESS
enum _cairo_fill_rule cairo_fill_rule_t
enum _cairo_antialias cairo_antialias_t
@ CAIRO_STATUS_SUCCESS
Definition: cairo.h:315
@ CAIRO_STATUS_NO_MEMORY
Definition: cairo.h:317
@ CAIRO_ANTIALIAS_NONE
Definition: cairo.h:713
enum _cairo_status cairo_status_t
@ CAIRO_FILL_RULE_WINDING
Definition: cairo.h:754
#define ARRAY_LENGTH(__array)
Definition: cairoint.h:137
#define free(a)
Definition: decNumber.cpp:310
int h
Definition: dviconv.c:9
#define fopen
Definition: xxstdio.h:21
#define t
Definition: afcover.h:96
static FIELD_PTR prev
Definition: genind.c:36
#define a(n)
Definition: gpos-common.c:148
#define unlikely(x)
Definition: jbig2arith.cc:116
#define MIN(a, b)
Definition: jpegint.h:269
#define NULL
Definition: ftobjs.h:61
small capitals from c petite p
Definition: afcover.h:72
small capitals from c petite p scientific i
Definition: afcover.h:80
sizeof(AF_ModuleRec)
kerning y
Definition: ttdriver.c:212
@ right
Definition: annotate.c:15
signed short int16_t
Definition: stdint.h:76
signed __int64 int64_t
Definition: stdint.h:89
unsigned int uint32_t
Definition: stdint.h:80
signed int int32_t
Definition: stdint.h:77
unsigned char uint8_t
Definition: stdint.h:78
#define cover(idx)
Definition: JPXStream.cc:177
int capacity
Definition: pdfcolor.c:1335
char is_vertical(int font_id)
Definition: tfm.c:781
#define INT_MIN
Definition: c-minmax.h:50
#define INT_MAX
Definition: c-minmax.h:53
#define UINT_MAX
Definition: c-minmax.h:56
#define fclose
Definition: debug.h:100
struct cell_struct cell
cell * list
Definition: list_routines.h:30
const int * pos
Definition: combiners.h:905
float x
Definition: cordic.py:15
static GooString antialias
Definition: pdftocairo.cc:119
#define sign(x)
set set set set set set set set set set set set set set set set set set set set *set set set macro pixldst op &r &cond WK op &r &cond WK op &r &cond WK else op &m &cond &ia op &r &cond WK else op &m &cond &ia elseif elseif else error unsupported base if elseif elseif else error unsupported unaligned pixldst unaligned endm macro pixst base base else pixldst base endif endm macro PF base if bpp PF set rept prefetch_distance PF set OFFSET endr endif endm macro preload_leading_step2 base if bpp ifc DST PF PF else if bpp lsl PF PF lsl PF sub
double scale
Definition: pnmhistmap.c:38
static int size
Definition: ppmlabel.c:24
bstring c int memset(void *s, int c, int length)
#define x1
#define x2
#define ix1
Definition: pt1.h:40
#define fx2
Definition: pt1.h:47
#define ix2
Definition: pt1.h:41
#define fx1
Definition: pt1.h:46
#define status
static void chunk(LexState *ls)
Definition: minilua.c:4678
#define mask(n)
Definition: lbitlib.c:93
#define prev_chunk(p)
Definition: lj_alloc.c:475
ShellFileEnvironment e
Definition: sh6.c:388
#define int64_t
Definition: stdint.in.h:194
cairo_status_t(* render_rows)(void *abstract_renderer, int y, int height, const cairo_half_open_span_t *coverages, unsigned num_coverages)
glitter_scan_converter_t converter[1]
struct _pool_chunk * prev_chunk
struct edge head tail
struct cell_list::@439 cell_pool
struct cell embedded[32]
struct cell * rewind
int16_t uncovered_area
struct cell * prev
struct cell * next
int16_t covered_height
Definition: tfm.c:163
struct quorem x
cairo_fixed_t dy
grid_scaled_y_t ytop
struct quorem dxdy_full
grid_scaled_y_t height_left
struct edge * prev
cairo_edge_t edge
struct quorem dxdy
struct edge * next
Definition: filedef.h:30
cairo_half_open_span_t * spans
cairo_half_open_span_t spans_embedded[64]
Definition: ttf.h:354
struct edge ** y_buckets
struct polygon::@438 edge_pool
grid_scaled_y_t ymin
struct edge * edges
grid_scaled_y_t ymax
struct edge * y_buckets_embedded[64]
struct edge embedded[32]
struct _pool_chunk * current
struct _pool_chunk * first_free
struct _pool_chunk sentinel[1]
cairo_fixed_t quo
cairo_fixed_t rem
Definition: dvips.h:235
#define FILE
Definition: t1stdio.h:34
int j
Definition: t4ht.c:1589
static double prev_x
Definition: tex4ht.c:1079