w32tex
About: TeX Live provides a comprehensive TeX system including all the major TeX-related programs, macro packages, and fonts that are free software. Windows sources.
  Fossies Dox: w32tex-src.tar.xz  ("unofficial" and yet experimental doxygen-generated source code documentation)  

cairo-tor22-scan-converter.c
Go to the documentation of this file.
1 /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
2 /* glitter-paths - polygon scan converter
3  *
4  * Copyright (c) 2008 M Joonas Pihlaja
5  * Copyright (c) 2007 David Turner
6  *
7  * Permission is hereby granted, free of charge, to any person
8  * obtaining a copy of this software and associated documentation
9  * files (the "Software"), to deal in the Software without
10  * restriction, including without limitation the rights to use,
11  * copy, modify, merge, publish, distribute, sublicense, and/or sell
12  * copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following
14  * conditions:
15  *
16  * The above copyright notice and this permission notice shall be
17  * included in all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26  * OTHER DEALINGS IN THE SOFTWARE.
27  */
28 /* This is the Glitter paths scan converter incorporated into cairo.
29  * The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
30  * of
31  *
32  * https://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
33  */
34 /* Glitter-paths is a stand alone polygon rasteriser derived from
35  * David Turner's reimplementation of Tor Anderssons's 15x17
36  * supersampling rasteriser from the Apparition graphics library. The
37  * main new feature here is cheaply choosing per-scan line between
38  * doing fully analytical coverage computation for an entire row at a
39  * time vs. using a supersampling approach.
40  *
41  * David Turner's code can be found at
42  *
43  * http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
44  *
45  * In particular this file incorporates large parts of ftgrays_tor10.h
46  * from raster-comparison-20070813.tar.bz2
47  */
48 /* Overview
49  *
50  * A scan converter's basic purpose to take polygon edges and convert
51  * them into an RLE compressed A8 mask. This one works in two phases:
52  * gathering edges and generating spans.
53  *
54  * 1) As the user feeds the scan converter edges they are vertically
55  * clipped and bucketted into a _polygon_ data structure. The edges
56  * are also snapped from the user's coordinates to the subpixel grid
57  * coordinates used during scan conversion.
58  *
59  * user
60  * |
61  * | edges
62  * V
63  * polygon buckets
64  *
65  * 2) Generating spans works by performing a vertical sweep of pixel
66  * rows from top to bottom and maintaining an _active_list_ of edges
67  * that intersect the row. From the active list the fill rule
68  * determines which edges are the left and right edges of the start of
69  * each span, and their contribution is then accumulated into a pixel
70  * coverage list (_cell_list_) as coverage deltas. Once the coverage
71  * deltas of all edges are known we can form spans of constant pixel
72  * coverage by summing the deltas during a traversal of the cell list.
73  * At the end of a pixel row the cell list is sent to a coverage
74  * blitter for rendering to some target surface.
75  *
76  * The pixel coverages are computed by either supersampling the row
77  * and box filtering a mono rasterisation, or by computing the exact
78  * coverages of edges in the active list. The supersampling method is
79  * used whenever some edge starts or stops within the row or there are
80  * edge intersections in the row.
81  *
82  * polygon bucket for \
83  * current pixel row |
84  * | |
85  * | activate new edges | Repeat GRID_Y times if we
86  * V \ are supersampling this row,
87  * active list / or just once if we're computing
88  * | | analytical coverage.
89  * | coverage deltas |
90  * V |
91  * pixel coverage list /
92  * |
93  * V
94  * coverage blitter
95  */
96 #include "cairoint.h"
97 #include "cairo-spans-private.h"
98 #include "cairo-error-private.h"
99 
100 #include <stdlib.h>
101 #include <string.h>
102 #include <limits.h>
103 #include <setjmp.h>
104 
105 /*-------------------------------------------------------------------------
106  * cairo specific config
107  */
108 #define I static
109 
110 /* Prefer cairo's status type. */
111 #define GLITTER_HAVE_STATUS_T 1
112 #define GLITTER_STATUS_SUCCESS CAIRO_STATUS_SUCCESS
113 #define GLITTER_STATUS_NO_MEMORY CAIRO_STATUS_NO_MEMORY
115 
116 /* The input coordinate scale and the rasterisation grid scales. */
117 #define GLITTER_INPUT_BITS CAIRO_FIXED_FRAC_BITS
118 //#define GRID_X_BITS CAIRO_FIXED_FRAC_BITS
119 //#define GRID_Y 15
120 #define GRID_X_BITS 2
121 #define GRID_Y_BITS 2
122 
123 /* Set glitter up to use a cairo span renderer to do the coverage
124  * blitting. */
125 struct pool;
126 struct cell_list;
127 
128 /*-------------------------------------------------------------------------
129  * glitter-paths.h
130  */
131 
132 /* "Input scaled" numbers are fixed precision reals with multiplier
133  * 2**GLITTER_INPUT_BITS. Input coordinates are given to glitter as
134  * pixel scaled numbers. These get converted to the internal grid
135  * scaled numbers as soon as possible. Internal overflow is possible
136  * if GRID_X/Y inside glitter-paths.c is larger than
137  * 1<<GLITTER_INPUT_BITS. */
138 #ifndef GLITTER_INPUT_BITS
139 # define GLITTER_INPUT_BITS 8
140 #endif
141 #define GLITTER_INPUT_SCALE (1<<GLITTER_INPUT_BITS)
143 
144 #if !GLITTER_HAVE_STATUS_T
145 typedef enum {
149 #endif
150 
151 #ifndef I
152 # define I /*static*/
153 #endif
154 
155 /* Opaque type for scan converting. */
157 
158 /* Reset a scan converter to accept polygon edges and set the clip box
159  * in pixels. Allocates O(ymax-ymin) bytes of memory. The clip box
160  * is set to integer pixel coordinates xmin <= x < xmax, ymin <= y <
161  * ymax. */
164  glitter_scan_converter_t *converter,
165  int xmin, int ymin,
166  int xmax, int ymax);
167 
168 /* Render the polygon in the scan converter to the given A8 format
169  * image raster. Only the pixels accessible as pixels[y*stride+x] for
170  * x,y inside the clip box are written to, where xmin <= x < xmax,
171  * ymin <= y < ymax. The image is assumed to be clear on input.
172  *
173  * If nonzero_fill is true then the interior of the polygon is
174  * computed with the non-zero fill rule. Otherwise the even-odd fill
175  * rule is used.
176  *
177  * The scan converter must be reset or destroyed after this call. */
178 
179 /*-------------------------------------------------------------------------
180  * glitter-paths.c: Implementation internal types
181  */
182 #include <stdlib.h>
183 #include <string.h>
184 #include <limits.h>
185 
186 /* All polygon coordinates are snapped onto a subsample grid. "Grid
187  * scaled" numbers are fixed precision reals with multiplier GRID_X or
188  * GRID_Y. */
189 typedef int grid_scaled_t;
190 typedef int grid_scaled_x_t;
191 typedef int grid_scaled_y_t;
192 
193 /* Default x/y scale factors.
194  * You can either define GRID_X/Y_BITS to get a power-of-two scale
195  * or define GRID_X/Y separately. */
196 #if !defined(GRID_X) && !defined(GRID_X_BITS)
197 # define GRID_X_BITS 8
198 #endif
199 #if !defined(GRID_Y) && !defined(GRID_Y_BITS)
200 # define GRID_Y 15
201 #endif
202 
203 /* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
204 #ifdef GRID_X_BITS
205 # define GRID_X (1 << GRID_X_BITS)
206 #endif
207 #ifdef GRID_Y_BITS
208 # define GRID_Y (1 << GRID_Y_BITS)
209 #endif
210 
211 /* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
212  * integer and fractional parts. The integer part is floored. */
213 #if defined(GRID_X_TO_INT_FRAC)
214  /* do nothing */
215 #elif defined(GRID_X_BITS)
216 # define GRID_X_TO_INT_FRAC(x, i, f) \
217  _GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)
218 #else
219 # define GRID_X_TO_INT_FRAC(x, i, f) \
220  _GRID_TO_INT_FRAC_general(x, i, f, GRID_X)
221 #endif
222 
223 #define _GRID_TO_INT_FRAC_general(t, i, f, m) do { \
224  (i) = (t) / (m); \
225  (f) = (t) % (m); \
226  if ((f) < 0) { \
227  --(i); \
228  (f) += (m); \
229  } \
230 } while (0)
231 
232 #define _GRID_TO_INT_FRAC_shift(t, i, f, b) do { \
233  (f) = (t) & ((1 << (b)) - 1); \
234  (i) = (t) >> (b); \
235 } while (0)
236 
237 /* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y. We want
238  * to be able to represent exactly areas of subpixel trapezoids whose
239  * vertices are given in grid scaled coordinates. The scale factor
240  * comes from needing to accurately represent the area 0.5*dx*dy of a
241  * triangle with base dx and height dy in grid scaled numbers. */
242 #define GRID_XY (2*GRID_X*GRID_Y) /* Unit area on the grid. */
243 
244 /* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
245 #if GRID_XY == 510
246 # define GRID_AREA_TO_ALPHA(c) (((c)+1) >> 1)
247 #elif GRID_XY == 255
248 # define GRID_AREA_TO_ALPHA(c) (c)
249 #elif GRID_XY == 64
250 # define GRID_AREA_TO_ALPHA(c) (((c) << 2) | -(((c) & 0x40) >> 6))
251 #elif GRID_XY == 32
252 # define GRID_AREA_TO_ALPHA(c) (((c) << 3) | -(((c) & 0x20) >> 5))
253 #elif GRID_XY == 128
254 # define GRID_AREA_TO_ALPHA(c) ((((c) << 1) | -((c) >> 7)) & 255)
255 #elif GRID_XY == 256
256 # define GRID_AREA_TO_ALPHA(c) (((c) | -((c) >> 8)) & 255)
257 #elif GRID_XY == 15
258 # define GRID_AREA_TO_ALPHA(c) (((c) << 4) + (c))
259 #elif GRID_XY == 2*256*15
260 # define GRID_AREA_TO_ALPHA(c) (((c) + ((c)<<4) + 256) >> 9)
261 #else
262 # define GRID_AREA_TO_ALPHA(c) (((c)*255 + GRID_XY/2) / GRID_XY)
263 #endif
264 
265 #define UNROLL3(x) x x x
266 
267 struct quorem {
268  int32_t quo;
269  int32_t rem;
270 };
271 
272 /* Header for a chunk of memory in a memory pool. */
273 struct _pool_chunk {
274  /* # bytes used in this chunk. */
275  size_t size;
276 
277  /* # bytes total in this chunk */
278  size_t capacity;
279 
280  /* Pointer to the previous chunk or %NULL if this is the sentinel
281  * chunk in the pool header. */
282  struct _pool_chunk *prev_chunk;
283 
284  /* Actual data starts here. Well aligned for pointers. */
285 };
286 
287 /* A memory pool. This is supposed to be embedded on the stack or
288  * within some other structure. It may optionally be followed by an
289  * embedded array from which requests are fulfilled until
290  * malloc needs to be called to allocate a first real chunk. */
291 struct pool {
292  /* Chunk we're allocating from. */
293  struct _pool_chunk *current;
294 
295  jmp_buf *jmp;
296 
297  /* Free list of previously allocated chunks. All have >= default
298  * capacity. */
299  struct _pool_chunk *first_free;
300 
301  /* The default capacity of a chunk. */
302  size_t default_capacity;
303 
304  /* Header for the sentinel chunk. Directly following the pool
305  * struct should be some space for embedded elements from which
306  * the sentinel chunk allocates from. */
307  struct _pool_chunk sentinel[1];
308 };
309 
310 /* A polygon edge. */
311 struct edge {
312  /* Next in y-bucket or active list. */
313  struct edge *next, *prev;
314 
315  /* Number of subsample rows remaining to scan convert of this
316  * edge. */
318 
319  /* Original sign of the edge: +1 for downwards, -1 for upwards
320  * edges. */
321  int dir;
322  int vertical;
323 
324  /* Current x coordinate while the edge is on the active
325  * list. Initialised to the x coordinate of the top of the
326  * edge. The quotient is in grid_scaled_x_t units and the
327  * remainder is mod dy in grid_scaled_y_t units.*/
328  struct quorem x;
329 
330  /* Advance of the current x when moving down a subsample line. */
331  struct quorem dxdy;
332 
333  /* The clipped y of the top of the edge. */
335 
336  /* y2-y1 after orienting the edge downwards. */
338 };
339 
340 #define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/GRID_Y)
341 
342 /* A collection of sorted and vertically clipped edges of the polygon.
343  * Edges are moved from the polygon to an active list while scan
344  * converting. */
345 struct polygon {
346  /* The vertical clip extents. */
348 
349  /* Array of edges all starting in the same bucket. An edge is put
350  * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
351  * it is added to the polygon. */
352  struct edge **y_buckets;
353  struct edge *y_buckets_embedded[64];
354 
355  struct {
356  struct pool base[1];
357  struct edge embedded[32];
359 };
360 
361 /* A cell records the effect on pixel coverage of polygon edges
362  * passing through a pixel. It contains two accumulators of pixel
363  * coverage.
364  *
365  * Consider the effects of a polygon edge on the coverage of a pixel
366  * it intersects and that of the following one. The coverage of the
367  * following pixel is the height of the edge multiplied by the width
368  * of the pixel, and the coverage of the pixel itself is the area of
369  * the trapezoid formed by the edge and the right side of the pixel.
370  *
371  * +-----------------------+-----------------------+
372  * | | |
373  * | | |
374  * |_______________________|_______________________|
375  * | \...................|.......................|\
376  * | \..................|.......................| |
377  * | \.................|.......................| |
378  * | \....covered.....|.......................| |
379  * | \....area.......|.......................| } covered height
380  * | \..............|.......................| |
381  * |uncovered\.............|.......................| |
382  * | area \............|.......................| |
383  * |___________\...........|.......................|/
384  * | | |
385  * | | |
386  * | | |
387  * +-----------------------+-----------------------+
388  *
389  * Since the coverage of the following pixel will always be a multiple
390  * of the width of the pixel, we can store the height of the covered
391  * area instead. The coverage of the pixel itself is the total
392  * coverage minus the area of the uncovered area to the left of the
393  * edge. As it's faster to compute the uncovered area we only store
394  * that and subtract it from the total coverage later when forming
395  * spans to blit.
396  *
397  * The heights and areas are signed, with left edges of the polygon
398  * having positive sign and right edges having negative sign. When
399  * two edges intersect they swap their left/rightness so their
400  * contribution above and below the intersection point must be
401  * computed separately. */
402 struct cell {
403  struct cell *next;
404  int x;
407 };
408 
409 /* A cell list represents the scan line sparsely as cells ordered by
410  * ascending x. It is geared towards scanning the cells in order
411  * using an internal cursor. */
412 struct cell_list {
413  /* Sentinel nodes */
414  struct cell head, tail;
415 
416  /* Cursor state for iterating through the cell list. */
417  struct cell *cursor, *rewind;
418 
419  /* Cells in the cell list are owned by the cell list and are
420  * allocated from this pool. */
421  struct {
422  struct pool base[1];
423  struct cell embedded[32];
425 };
426 
427 struct cell_pair {
428  struct cell *cell1;
429  struct cell *cell2;
430 };
431 
432 /* The active list contains edges in the current scan line ordered by
433  * the x-coordinate of the intercept of the edge and the scan line. */
434 struct active_list {
435  /* Leftmost edge on the current scan line. */
436  struct edge head, tail;
437 
438  /* A lower bound on the height of the active edges is used to
439  * estimate how soon some active edge ends. We can't advance the
440  * scan conversion by a full pixel row if an edge ends somewhere
441  * within it. */
443  int is_vertical;
444 };
445 
446 struct glitter_scan_converter {
447  struct polygon polygon[1];
448  struct active_list active[1];
449  struct cell_list coverages[1];
450 
453 
454  /* Clip box. */
457 };
458 
459 /* Compute the floored division a/b. Assumes / and % perform symmetric
460  * division. */
461 inline static struct quorem
463 {
464  struct quorem qr;
465  qr.quo = a/b;
466  qr.rem = a%b;
467  if ((a^b)<0 && qr.rem) {
468  qr.quo -= 1;
469  qr.rem += b;
470  }
471  return qr;
472 }
473 
474 /* Compute the floored division (x*a)/b. Assumes / and % perform symmetric
475  * division. */
476 static struct quorem
478 {
479  struct quorem qr;
480  long long xa = (long long)x*a;
481  qr.quo = xa/b;
482  qr.rem = xa%b;
483  if ((xa>=0) != (b>=0) && qr.rem) {
484  qr.quo -= 1;
485  qr.rem += b;
486  }
487  return qr;
488 }
489 
490 static struct _pool_chunk *
492  struct _pool_chunk *p,
493  struct _pool_chunk *prev_chunk,
494  size_t capacity)
495 {
496  p->prev_chunk = prev_chunk;
497  p->size = 0;
498  p->capacity = capacity;
499  return p;
500 }
501 
502 static struct _pool_chunk *
504 {
505  struct _pool_chunk *p;
506 
507  p = _cairo_malloc (size + sizeof(struct _pool_chunk));
508  if (unlikely (NULL == p))
510 
511  return _pool_chunk_init(p, pool->current, size);
512 }
513 
514 static void
516  jmp_buf *jmp,
517  size_t default_capacity,
518  size_t embedded_capacity)
519 {
520  pool->jmp = jmp;
522  pool->first_free = NULL;
523  pool->default_capacity = default_capacity;
524  _pool_chunk_init(pool->sentinel, NULL, embedded_capacity);
525 }
526 
527 static void
529 {
530  struct _pool_chunk *p = pool->current;
531  do {
532  while (NULL != p) {
533  struct _pool_chunk *prev = p->prev_chunk;
534  if (p != pool->sentinel)
535  free(p);
536  p = prev;
537  }
538  p = pool->first_free;
539  pool->first_free = NULL;
540  } while (NULL != p);
541 }
542 
543 /* Satisfy an allocation by first allocating a new large enough chunk
544  * and adding it to the head of the pool's chunk list. This function
545  * is called as a fallback if pool_alloc() couldn't do a quick
546  * allocation from the current chunk in the pool. */
547 static void *
549  struct pool *pool,
550  size_t size)
551 {
552  struct _pool_chunk *chunk;
553  void *obj;
554  size_t capacity;
555 
556  /* If the allocation is smaller than the default chunk size then
557  * try getting a chunk off the free list. Force alloc of a new
558  * chunk for large requests. */
559  capacity = size;
560  chunk = NULL;
561  if (size < pool->default_capacity) {
563  chunk = pool->first_free;
564  if (chunk) {
565  pool->first_free = chunk->prev_chunk;
566  _pool_chunk_init(chunk, pool->current, chunk->capacity);
567  }
568  }
569 
570  if (NULL == chunk)
572  pool->current = chunk;
573 
574  obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
575  chunk->size += size;
576  return obj;
577 }
578 
579 /* Allocate size bytes from the pool. The first allocated address
580  * returned from a pool is aligned to sizeof(void*). Subsequent
581  * addresses will maintain alignment as long as multiples of void* are
582  * allocated. Returns the address of a new memory area or %NULL on
583  * allocation failures. The pool retains ownership of the returned
584  * memory. */
585 inline static void *
586 pool_alloc (struct pool *pool, size_t size)
587 {
588  struct _pool_chunk *chunk = pool->current;
589 
590  if (size <= chunk->capacity - chunk->size) {
591  void *obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
592  chunk->size += size;
593  return obj;
594  } else {
596  }
597 }
598 
599 /* Relinquish all pool_alloced memory back to the pool. */
600 static void
602 {
603  /* Transfer all used chunks to the chunk free list. */
604  struct _pool_chunk *chunk = pool->current;
605  if (chunk != pool->sentinel) {
606  while (chunk->prev_chunk != pool->sentinel) {
607  chunk = chunk->prev_chunk;
608  }
609  chunk->prev_chunk = pool->first_free;
611  }
612  /* Reset the sentinel as the current chunk. */
614  pool->sentinel->size = 0;
615 }
616 
617 /* Rewinds the cell list's cursor to the beginning. After rewinding
618  * we're good to cell_list_find() the cell any x coordinate. */
619 inline static void
621 {
622  cells->cursor = &cells->head;
623 }
624 
625 inline static void
626 cell_list_maybe_rewind (struct cell_list *cells, int x)
627 {
628  if (x < cells->cursor->x) {
629  cells->cursor = cells->rewind;
630  if (x < cells->cursor->x)
631  cells->cursor = &cells->head;
632  }
633 }
634 
635 inline static void
637 {
638  cells->rewind = cells->cursor;
639 }
640 
641 static void
642 cell_list_init(struct cell_list *cells, jmp_buf *jmp)
643 {
644  pool_init(cells->cell_pool.base, jmp,
645  256*sizeof(struct cell),
646  sizeof(cells->cell_pool.embedded));
647  cells->tail.next = NULL;
648  cells->tail.x = INT_MAX;
649  cells->head.x = INT_MIN;
650  cells->head.next = &cells->tail;
651  cell_list_rewind (cells);
652 }
653 
654 static void
655 cell_list_fini(struct cell_list *cells)
656 {
657  pool_fini (cells->cell_pool.base);
658 }
659 
660 /* Empty the cell list. This is called at the start of every pixel
661  * row. */
662 inline static void
663 cell_list_reset (struct cell_list *cells)
664 {
665  cell_list_rewind (cells);
666  cells->head.next = &cells->tail;
667  pool_reset (cells->cell_pool.base);
668 }
669 
670 inline static struct cell *
671 cell_list_alloc (struct cell_list *cells,
672  struct cell *tail,
673  int x)
674 {
675  struct cell *cell;
676 
677  cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
678  cell->next = tail->next;
679  tail->next = cell;
680  cell->x = x;
681  *(uint32_t *)&cell->uncovered_area = 0;
682 
683  return cell;
684 }
685 
686 /* Find a cell at the given x-coordinate. Returns %NULL if a new cell
687  * needed to be allocated but couldn't be. Cells must be found with
688  * non-decreasing x-coordinate until the cell list is rewound using
689  * cell_list_rewind(). Ownership of the returned cell is retained by
690  * the cell list. */
691 inline static struct cell *
692 cell_list_find (struct cell_list *cells, int x)
693 {
694  struct cell *tail = cells->cursor;
695 
696  if (tail->x == x)
697  return tail;
698 
699  while (1) {
700  UNROLL3({
701  if (tail->next->x > x)
702  break;
703  tail = tail->next;
704  });
705  }
706 
707  if (tail->x != x)
708  tail = cell_list_alloc (cells, tail, x);
709  return cells->cursor = tail;
710 
711 }
712 
713 /* Find two cells at x1 and x2. This is exactly equivalent
714  * to
715  *
716  * pair.cell1 = cell_list_find(cells, x1);
717  * pair.cell2 = cell_list_find(cells, x2);
718  *
719  * except with less function call overhead. */
720 inline static struct cell_pair
721 cell_list_find_pair(struct cell_list *cells, int x1, int x2)
722 {
723  struct cell_pair pair;
724 
725  pair.cell1 = cells->cursor;
726  while (1) {
727  UNROLL3({
728  if (pair.cell1->next->x > x1)
729  break;
730  pair.cell1 = pair.cell1->next;
731  });
732  }
733  if (pair.cell1->x != x1)
734  pair.cell1 = cell_list_alloc (cells, pair.cell1, x1);
735 
736  pair.cell2 = pair.cell1;
737  while (1) {
738  UNROLL3({
739  if (pair.cell2->next->x > x2)
740  break;
741  pair.cell2 = pair.cell2->next;
742  });
743  }
744  if (pair.cell2->x != x2)
745  pair.cell2 = cell_list_alloc (cells, pair.cell2, x2);
746 
747  cells->cursor = pair.cell2;
748  return pair;
749 }
750 
751 /* Add a subpixel span covering [x1, x2) to the coverage cells. */
752 inline static void
756 {
757  int ix1, fx1;
758  int ix2, fx2;
759 
760  if (x1 == x2)
761  return;
762 
765 
766  if (ix1 != ix2) {
767  struct cell_pair p;
768  p = cell_list_find_pair(cells, ix1, ix2);
769  p.cell1->uncovered_area += 2*fx1;
770  ++p.cell1->covered_height;
771  p.cell2->uncovered_area -= 2*fx2;
772  --p.cell2->covered_height;
773  } else {
774  struct cell *cell = cell_list_find(cells, ix1);
775  cell->uncovered_area += 2*(fx1-fx2);
776  }
777 }
778 
779 /* Adds the analytical coverage of an edge crossing the current pixel
780  * row to the coverage cells and advances the edge's x position to the
781  * following row.
782  *
783  * This function is only called when we know that during this pixel row:
784  *
785  * 1) The relative order of all edges on the active list doesn't
786  * change. In particular, no edges intersect within this row to pixel
787  * precision.
788  *
789  * 2) No new edges start in this row.
790  *
791  * 3) No existing edges end mid-row.
792  *
793  * This function depends on being called with all edges from the
794  * active list in the order they appear on the list (i.e. with
795  * non-decreasing x-coordinate.) */
796 static void
798  struct edge *edge,
799  int sign)
800 {
802  struct cell *cell;
803  int ix;
804 
805  GRID_X_TO_INT_FRAC(edge->x.quo, ix, fx);
806 
807  /* We always know that ix1 is >= the cell list cursor in this
808  * case due to the no-intersections precondition. */
809  cell = cell_list_find(cells, ix);
812 }
813 
814 static void
815 polygon_init (struct polygon *polygon, jmp_buf *jmp)
816 {
817  polygon->ymin = polygon->ymax = 0;
820  8192 - sizeof (struct _pool_chunk),
821  sizeof (polygon->edge_pool.embedded));
822 }
823 
824 static void
826 {
829 
831 }
832 
833 /* Empties the polygon of all edges. The polygon is then prepared to
834  * receive new edges and clip them to the vertical range
835  * [ymin,ymax). */
836 static glitter_status_t
838  grid_scaled_y_t ymin,
839  grid_scaled_y_t ymax)
840 {
841  unsigned h = ymax - ymin;
842  unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + GRID_Y-1, ymin);
843 
845 
846  if (unlikely (h > 0x7FFFFFFFU - GRID_Y))
847  goto bail_no_mem; /* even if you could, you wouldn't want to. */
848 
851 
853  if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) {
854  polygon->y_buckets = _cairo_malloc_ab (num_buckets,
855  sizeof (struct edge *));
856  if (unlikely (NULL == polygon->y_buckets))
857  goto bail_no_mem;
858  }
859  memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
860 
861  polygon->ymin = ymin;
862  polygon->ymax = ymax;
863  return GLITTER_STATUS_SUCCESS;
864 
865 bail_no_mem:
866  polygon->ymin = 0;
867  polygon->ymax = 0;
869 }
870 
871 static void
873  struct edge *e)
874 {
875  unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin);
876  struct edge **ptail = &polygon->y_buckets[ix];
877  e->next = *ptail;
878  *ptail = e;
879 }
880 
881 inline static void
883  const cairo_edge_t *edge)
884 {
885  struct edge *e;
886  grid_scaled_x_t dx;
888  grid_scaled_y_t ytop, ybot;
889  grid_scaled_y_t ymin = polygon->ymin;
890  grid_scaled_y_t ymax = polygon->ymax;
891 
892  if (unlikely (edge->top >= ymax || edge->bottom <= ymin))
893  return;
894 
895  e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
896 
897  dx = edge->line.p2.x - edge->line.p1.x;
898  dy = edge->line.p2.y - edge->line.p1.y;
899  e->dy = dy;
900  e->dir = edge->dir;
901 
902  ytop = edge->top >= ymin ? edge->top : ymin;
903  ybot = edge->bottom <= ymax ? edge->bottom : ymax;
904  e->ytop = ytop;
905  e->height_left = ybot - ytop;
906 
907  if (dx == 0) {
908  e->vertical = TRUE;
909  e->x.quo = edge->line.p1.x;
910  e->x.rem = 0;
911  e->dxdy.quo = 0;
912  e->dxdy.rem = 0;
913  } else {
914  e->vertical = FALSE;
915  e->dxdy = floored_divrem (dx, dy);
916  if (ytop == edge->line.p1.y) {
917  e->x.quo = edge->line.p1.x;
918  e->x.rem = 0;
919  } else {
920  e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy);
921  e->x.quo += edge->line.p1.x;
922  }
923  }
924 
926 
927  e->x.rem -= dy; /* Bias the remainder for faster
928  * edge advancement. */
929 }
930 
931 static void
933 {
934  active->head.vertical = 1;
935  active->head.height_left = INT_MAX;
936  active->head.x.quo = INT_MIN;
937  active->head.prev = NULL;
938  active->head.next = &active->tail;
939  active->tail.prev = &active->head;
940  active->tail.next = NULL;
941  active->tail.x.quo = INT_MAX;
942  active->tail.height_left = INT_MAX;
943  active->tail.vertical = 1;
944  active->min_height = 0;
945  active->is_vertical = 1;
946 }
947 
948 static void
950 {
952 }
953 
954 /*
955  * Merge two sorted edge lists.
956  * Input:
957  * - head_a: The head of the first list.
958  * - head_b: The head of the second list; head_b cannot be NULL.
959  * Output:
960  * Returns the head of the merged list.
961  *
962  * Implementation notes:
963  * To make it fast (in particular, to reduce to an insertion sort whenever
964  * one of the two input lists only has a single element) we iterate through
965  * a list until its head becomes greater than the head of the other list,
966  * then we switch their roles. As soon as one of the two lists is empty, we
967  * just attach the other one to the current list and exit.
968  * Writes to memory are only needed to "switch" lists (as it also requires
969  * attaching to the output list the list which we will be iterating next) and
970  * to attach the last non-empty list.
971  */
972 static struct edge *
973 merge_sorted_edges (struct edge *head_a, struct edge *head_b)
974 {
975  struct edge *head, **next, *prev;
976  int32_t x;
977 
978  prev = head_a->prev;
979  next = &head;
980  if (head_a->x.quo <= head_b->x.quo) {
981  head = head_a;
982  } else {
983  head = head_b;
984  head_b->prev = prev;
985  goto start_with_b;
986  }
987 
988  do {
989  x = head_b->x.quo;
990  while (head_a != NULL && head_a->x.quo <= x) {
991  prev = head_a;
992  next = &head_a->next;
993  head_a = head_a->next;
994  }
995 
996  head_b->prev = prev;
997  *next = head_b;
998  if (head_a == NULL)
999  return head;
1000 
1001 start_with_b:
1002  x = head_a->x.quo;
1003  while (head_b != NULL && head_b->x.quo <= x) {
1004  prev = head_b;
1005  next = &head_b->next;
1006  head_b = head_b->next;
1007  }
1008 
1009  head_a->prev = prev;
1010  *next = head_a;
1011  if (head_b == NULL)
1012  return head;
1013  } while (1);
1014 }
1015 
1016 /*
1017  * Sort (part of) a list.
1018  * Input:
1019  * - list: The list to be sorted; list cannot be NULL.
1020  * - limit: Recursion limit.
1021  * Output:
1022  * - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
1023  * input list; if the input list has fewer elements, head_out be a sorted list
1024  * containing all the elements of the input list.
1025  * Returns the head of the list of unprocessed elements (NULL if the sorted list contains
1026  * all the elements of the input list).
1027  *
1028  * Implementation notes:
1029  * Special case single element list, unroll/inline the sorting of the first two elements.
1030  * Some tail recursion is used since we iterate on the bottom-up solution of the problem
1031  * (we start with a small sorted list and keep merging other lists of the same size to it).
1032  */
1033 static struct edge *
1035  unsigned int level,
1036  struct edge **head_out)
1037 {
1038  struct edge *head_other, *remaining;
1039  unsigned int i;
1040 
1041  head_other = list->next;
1042 
1043  if (head_other == NULL) {
1044  *head_out = list;
1045  return NULL;
1046  }
1047 
1048  remaining = head_other->next;
1049  if (list->x.quo <= head_other->x.quo) {
1050  *head_out = list;
1051  head_other->next = NULL;
1052  } else {
1053  *head_out = head_other;
1054  head_other->prev = list->prev;
1055  head_other->next = list;
1056  list->prev = head_other;
1057  list->next = NULL;
1058  }
1059 
1060  for (i = 0; i < level && remaining; i++) {
1061  remaining = sort_edges (remaining, i, &head_other);
1062  *head_out = merge_sorted_edges (*head_out, head_other);
1063  }
1064 
1065  return remaining;
1066 }
1067 
1068 static struct edge *
1069 merge_unsorted_edges (struct edge *head, struct edge *unsorted)
1070 {
1071  sort_edges (unsorted, UINT_MAX, &unsorted);
1072  return merge_sorted_edges (head, unsorted);
1073 }
1074 
1075 /* Test if the edges on the active list can be safely advanced by a
1076  * full row without intersections or any edges ending. */
1077 inline static int
1079 {
1080  const struct edge *e;
1081 
1082  /* Recomputes the minimum height of all edges on the active
1083  * list if we have been dropping edges. */
1084  if (active->min_height <= 0) {
1085  int min_height = INT_MAX;
1086  int is_vertical = 1;
1087 
1088  e = active->head.next;
1089  while (NULL != e) {
1090  if (e->height_left < min_height)
1091  min_height = e->height_left;
1092  is_vertical &= e->vertical;
1093  e = e->next;
1094  }
1095 
1096  active->is_vertical = is_vertical;
1097  active->min_height = min_height;
1098  }
1099 
1100  if (active->min_height < GRID_Y)
1101  return 0;
1102 
1103  return active->is_vertical;
1104 }
1105 
1106 /* Merges edges on the given subpixel row from the polygon to the
1107  * active_list. */
1108 inline static void
1110  struct edge *edges)
1111 {
1112  active->head.next = merge_unsorted_edges (active->head.next, edges);
1113 }
1114 
1115 inline static void
1117  struct edge *edge,
1118  int y,
1119  struct edge **buckets)
1120 {
1121  grid_scaled_y_t min_height = active->min_height;
1122  int is_vertical = active->is_vertical;
1123 
1124  while (edge) {
1125  struct edge *next = edge->next;
1126  int suby = edge->ytop - y;
1127  if (buckets[suby])
1128  buckets[suby]->prev = edge;
1129  edge->next = buckets[suby];
1130  edge->prev = NULL;
1131  buckets[suby] = edge;
1132  if (edge->height_left < min_height)
1133  min_height = edge->height_left;
1135  edge = next;
1136  }
1137 
1138  active->is_vertical = is_vertical;
1139  active->min_height = min_height;
1140 }
1141 
1142 inline static void
1144  struct cell_list *coverages,
1145  unsigned int mask)
1146 {
1147  struct edge *edge = active->head.next;
1148  int xstart = INT_MIN, prev_x = INT_MIN;
1149  int winding = 0;
1150 
1151  cell_list_rewind (coverages);
1152 
1153  while (&active->tail != edge) {
1154  struct edge *next = edge->next;
1155  int xend = edge->x.quo;
1156 
1157  if (--edge->height_left) {
1158  edge->x.quo += edge->dxdy.quo;
1159  edge->x.rem += edge->dxdy.rem;
1160  if (edge->x.rem >= 0) {
1161  ++edge->x.quo;
1162  edge->x.rem -= edge->dy;
1163  }
1164 
1165  if (edge->x.quo < prev_x) {
1166  struct edge *pos = edge->prev;
1167  pos->next = next;
1168  next->prev = pos;
1169  do {
1170  pos = pos->prev;
1171  } while (edge->x.quo < pos->x.quo);
1172  pos->next->prev = edge;
1173  edge->next = pos->next;
1174  edge->prev = pos;
1175  pos->next = edge;
1176  } else
1177  prev_x = edge->x.quo;
1178  } else {
1179  edge->prev->next = next;
1180  next->prev = edge->prev;
1181  }
1182 
1183  winding += edge->dir;
1184  if ((winding & mask) == 0) {
1185  if (next->x.quo != xend) {
1186  cell_list_add_subspan (coverages, xstart, xend);
1187  xstart = INT_MIN;
1188  }
1189  } else if (xstart == INT_MIN)
1190  xstart = xend;
1191 
1192  edge = next;
1193  }
1194 }
1195 
1196 inline static void dec (struct edge *e, int h)
1197 {
1198  e->height_left -= h;
1199  if (e->height_left == 0) {
1200  e->prev->next = e->next;
1201  e->next->prev = e->prev;
1202  }
1203 }
1204 
1205 static void
1207  struct cell_list *coverages,
1208  unsigned int mask)
1209 {
1210  struct edge *left = active->head.next;
1211 
1212  while (&active->tail != left) {
1213  struct edge *right;
1214  int winding;
1215 
1216  dec (left, GRID_Y);
1217 
1218  winding = left->dir;
1219  right = left->next;
1220  do {
1221  dec (right, GRID_Y);
1222 
1223  winding += right->dir;
1224  if ((winding & mask) == 0 && right->next->x.quo != right->x.quo)
1225  break;
1226 
1227  right = right->next;
1228  } while (1);
1229 
1230  cell_list_set_rewind (coverages);
1231  cell_list_render_edge (coverages, left, +1);
1232  cell_list_render_edge (coverages, right, -1);
1233 
1234  left = right->next;
1235  }
1236 }
1237 
1238 static void
1240 {
1241  polygon_init(converter->polygon, jmp);
1242  active_list_init(converter->active);
1243  cell_list_init(converter->coverages, jmp);
1244  converter->xmin=0;
1245  converter->ymin=0;
1246  converter->xmax=0;
1247  converter->ymax=0;
1248 }
1249 
1250 static void
1252 {
1253  if (self->spans != self->spans_embedded)
1254  free (self->spans);
1255 
1256  polygon_fini(self->polygon);
1257  cell_list_fini(self->coverages);
1258 
1259  self->xmin=0;
1260  self->ymin=0;
1261  self->xmax=0;
1262  self->ymax=0;
1263 }
1264 
1265 static grid_scaled_t
1267 {
1268  /* Clamp to max/min representable scaled number. */
1269  if (i >= 0) {
1270  if (i >= INT_MAX/scale)
1271  i = INT_MAX/scale;
1272  }
1273  else {
1274  if (i <= INT_MIN/scale)
1275  i = INT_MIN/scale;
1276  }
1277  return i*scale;
1278 }
1279 
1280 #define int_to_grid_scaled_x(x) int_to_grid_scaled((x), GRID_X)
1281 #define int_to_grid_scaled_y(x) int_to_grid_scaled((x), GRID_Y)
1282 
1285  glitter_scan_converter_t *converter,
1286  int xmin, int ymin,
1287  int xmax, int ymax)
1288 {
1290 
1291  converter->xmin = 0; converter->xmax = 0;
1292  converter->ymin = 0; converter->ymax = 0;
1293 
1294  if (xmax - xmin > ARRAY_LENGTH(converter->spans_embedded)) {
1295  converter->spans = _cairo_malloc_ab (xmax - xmin,
1296  sizeof (cairo_half_open_span_t));
1297  if (unlikely (converter->spans == NULL))
1299  } else
1300  converter->spans = converter->spans_embedded;
1301 
1302  xmin = int_to_grid_scaled_x(xmin);
1303  ymin = int_to_grid_scaled_y(ymin);
1304  xmax = int_to_grid_scaled_x(xmax);
1305  ymax = int_to_grid_scaled_y(ymax);
1306 
1307  active_list_reset(converter->active);
1308  cell_list_reset(converter->coverages);
1309  status = polygon_reset(converter->polygon, ymin, ymax);
1310  if (status)
1311  return status;
1312 
1313  converter->xmin = xmin;
1314  converter->xmax = xmax;
1315  converter->ymin = ymin;
1316  converter->ymax = ymax;
1317  return GLITTER_STATUS_SUCCESS;
1318 }
1319 
1320 /* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
1321  * These macros convert an input coordinate in the client's
1322  * device space to the rasterisation grid.
1323  */
1324 /* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
1325  * shifts if possible, and something saneish if not.
1326  */
1327 #if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS
1328 # define INPUT_TO_GRID_Y(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_Y_BITS)
1329 #else
1330 # define INPUT_TO_GRID_Y(in, out) INPUT_TO_GRID_general(in, out, GRID_Y)
1331 #endif
1332 
1333 #if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS) && GRID_X_BITS <= GLITTER_INPUT_BITS
1334 # define INPUT_TO_GRID_X(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_X_BITS)
1335 #else
1336 # define INPUT_TO_GRID_X(in, out) INPUT_TO_GRID_general(in, out, GRID_X)
1337 #endif
1338 
1339 #define INPUT_TO_GRID_general(in, out, grid_scale) do { \
1340  long long tmp__ = (long long)(grid_scale) * (in); \
1341  tmp__ >>= GLITTER_INPUT_BITS; \
1342  (out) = tmp__; \
1343 } while (0)
1344 
1345 /* Add a new polygon edge from pixel (x1,y1) to (x2,y2) to the scan
1346  * converter. The coordinates represent pixel positions scaled by
1347  * 2**GLITTER_PIXEL_BITS. If this function fails then the scan
1348  * converter should be reset or destroyed. Dir must be +1 or -1,
1349  * with the latter reversing the orientation of the edge. */
1350 I void
1352  const cairo_edge_t *edge)
1353 {
1354  cairo_edge_t e;
1355 
1356  INPUT_TO_GRID_Y (edge->top, e.top);
1357  INPUT_TO_GRID_Y (edge->bottom, e.bottom);
1358  if (e.top >= e.bottom)
1359  return;
1360 
1361  /* XXX: possible overflows if GRID_X/Y > 2**GLITTER_INPUT_BITS */
1362  INPUT_TO_GRID_Y (edge->line.p1.y, e.line.p1.y);
1363  INPUT_TO_GRID_Y (edge->line.p2.y, e.line.p2.y);
1364  if (e.line.p1.y == e.line.p2.y)
1365  e.line.p2.y++; /* Fudge to prevent div-by-zero */
1366 
1367  INPUT_TO_GRID_X (edge->line.p1.x, e.line.p1.x);
1368  INPUT_TO_GRID_X (edge->line.p2.x, e.line.p2.x);
1369 
1370  e.dir = edge->dir;
1371 
1372  polygon_add_edge (converter->polygon, &e);
1373 }
1374 
1375 static void
1377 {
1378  struct edge *edge;
1379 
1380  count *= GRID_Y;
1381  for (edge = active->head.next; edge != &active->tail; edge = edge->next) {
1382  edge->height_left -= count;
1383  if (! edge->height_left) {
1384  edge->prev->next = edge->next;
1385  edge->next->prev = edge->prev;
1386  }
1387  }
1388 }
1389 
1390 static glitter_status_t
1391 blit_a8 (struct cell_list *cells,
1392  cairo_span_renderer_t *renderer,
1393  cairo_half_open_span_t *spans,
1394  int y, int height,
1395  int xmin, int xmax)
1396 {
1397  struct cell *cell = cells->head.next;
1398  int prev_x = xmin, last_x = -1;
1399  int16_t cover = 0, last_cover = 0;
1400  unsigned num_spans;
1401 
1402  if (cell == &cells->tail)
1403  return CAIRO_STATUS_SUCCESS;
1404 
1405  /* Skip cells to the left of the clip region. */
1406  while (cell->x < xmin) {
1408  cell = cell->next;
1409  }
1410  cover *= GRID_X*2;
1411 
1412  /* Form the spans from the coverages and areas. */
1413  num_spans = 0;
1414  for (; cell->x < xmax; cell = cell->next) {
1415  int x = cell->x;
1416  int16_t area;
1417 
1418  if (x > prev_x && cover != last_cover) {
1419  spans[num_spans].x = prev_x;
1420  spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1421  last_cover = cover;
1422  last_x = prev_x;
1423  ++num_spans;
1424  }
1425 
1427  area = cover - cell->uncovered_area;
1428 
1429  if (area != last_cover) {
1430  spans[num_spans].x = x;
1431  spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area);
1432  last_cover = area;
1433  last_x = x;
1434  ++num_spans;
1435  }
1436 
1437  prev_x = x+1;
1438  }
1439 
1440  if (prev_x <= xmax && cover != last_cover) {
1441  spans[num_spans].x = prev_x;
1442  spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1443  last_cover = cover;
1444  last_x = prev_x;
1445  ++num_spans;
1446  }
1447 
1448  if (last_x < xmax && last_cover) {
1449  spans[num_spans].x = xmax;
1450  spans[num_spans].coverage = 0;
1451  ++num_spans;
1452  }
1453 
1454  /* Dump them into the renderer. */
1455  return renderer->render_rows (renderer, y, height, spans, num_spans);
1456 }
1457 
1458 #define GRID_AREA_TO_A1(A) ((GRID_AREA_TO_ALPHA (A) > 127) ? 255 : 0)
1459 static glitter_status_t
1460 blit_a1 (struct cell_list *cells,
1461  cairo_span_renderer_t *renderer,
1462  cairo_half_open_span_t *spans,
1463  int y, int height,
1464  int xmin, int xmax)
1465 {
1466  struct cell *cell = cells->head.next;
1467  int prev_x = xmin, last_x = -1;
1468  int16_t cover = 0;
1469  uint8_t coverage, last_cover = 0;
1470  unsigned num_spans;
1471 
1472  if (cell == &cells->tail)
1473  return CAIRO_STATUS_SUCCESS;
1474 
1475  /* Skip cells to the left of the clip region. */
1476  while (cell->x < xmin) {
1478  cell = cell->next;
1479  }
1480  cover *= GRID_X*2;
1481 
1482  /* Form the spans from the coverages and areas. */
1483  num_spans = 0;
1484  for (; cell->x < xmax; cell = cell->next) {
1485  int x = cell->x;
1486  int16_t area;
1487 
1489  if (x > prev_x && coverage != last_cover) {
1490  last_x = spans[num_spans].x = prev_x;
1491  last_cover = spans[num_spans].coverage = coverage;
1492  ++num_spans;
1493  }
1494 
1496  area = cover - cell->uncovered_area;
1497 
1498  coverage = GRID_AREA_TO_A1 (area);
1499  if (coverage != last_cover) {
1500  last_x = spans[num_spans].x = x;
1501  last_cover = spans[num_spans].coverage = coverage;
1502  ++num_spans;
1503  }
1504 
1505  prev_x = x+1;
1506  }
1507 
1509  if (prev_x <= xmax && coverage != last_cover) {
1510  last_x = spans[num_spans].x = prev_x;
1511  last_cover = spans[num_spans].coverage = coverage;
1512  ++num_spans;
1513  }
1514 
1515  if (last_x < xmax && last_cover) {
1516  spans[num_spans].x = xmax;
1517  spans[num_spans].coverage = 0;
1518  ++num_spans;
1519  }
1520  if (num_spans == 1)
1521  return CAIRO_STATUS_SUCCESS;
1522 
1523  /* Dump them into the renderer. */
1524  return renderer->render_rows (renderer, y, height, spans, num_spans);
1525 }
1526 
1527 
1528 I void
1530  unsigned int winding_mask,
1531  int antialias,
1532  cairo_span_renderer_t *renderer)
1533 {
1534  int i, j;
1535  int ymax_i = converter->ymax / GRID_Y;
1536  int ymin_i = converter->ymin / GRID_Y;
1537  int xmin_i, xmax_i;
1538  int h = ymax_i - ymin_i;
1539  struct polygon *polygon = converter->polygon;
1540  struct cell_list *coverages = converter->coverages;
1541  struct active_list *active = converter->active;
1542  struct edge *buckets[GRID_Y] = { 0 };
1543 
1544  xmin_i = converter->xmin / GRID_X;
1545  xmax_i = converter->xmax / GRID_X;
1546  if (xmin_i >= xmax_i)
1547  return;
1548 
1549  /* Render each pixel row. */
1550  for (i = 0; i < h; i = j) {
1551  int do_full_row = 0;
1552 
1553  j = i + 1;
1554 
1555  /* Determine if we can ignore this row or use the full pixel
1556  * stepper. */
1557  if (! polygon->y_buckets[i]) {
1558  if (active->head.next == &active->tail) {
1559  active->min_height = INT_MAX;
1560  active->is_vertical = 1;
1561  for (; j < h && ! polygon->y_buckets[j]; j++)
1562  ;
1563  continue;
1564  }
1565 
1566  do_full_row = can_do_full_row (active);
1567  }
1568 
1569  if (do_full_row) {
1570  /* Step by a full pixel row's worth. */
1571  full_row (active, coverages, winding_mask);
1572 
1573  if (active->is_vertical) {
1574  while (j < h &&
1575  polygon->y_buckets[j] == NULL &&
1576  active->min_height >= 2*GRID_Y)
1577  {
1578  active->min_height -= GRID_Y;
1579  j++;
1580  }
1581  if (j != i + 1)
1582  step_edges (active, j - (i + 1));
1583  }
1584  } else {
1585  int sub;
1586 
1588  polygon->y_buckets[i],
1589  (i+ymin_i)*GRID_Y,
1590  buckets);
1591 
1592  /* Subsample this row. */
1593  for (sub = 0; sub < GRID_Y; sub++) {
1594  if (buckets[sub]) {
1596  buckets[sub] = NULL;
1597  }
1598 
1599  sub_row (active, coverages, winding_mask);
1600  }
1601  }
1602 
1603  if (antialias)
1604  blit_a8 (coverages, renderer, converter->spans,
1605  i+ymin_i, j-i, xmin_i, xmax_i);
1606  else
1607  blit_a1 (coverages, renderer, converter->spans,
1608  i+ymin_i, j-i, xmin_i, xmax_i);
1609  cell_list_reset (coverages);
1610 
1611  active->min_height -= GRID_Y;
1612  }
1613 }
1614 
1617 
1621 
1622  jmp_buf jmp;
1623 };
1624 
1626 
1627 static void
1629 {
1631  if (self == NULL) {
1632  return;
1633  }
1634  _glitter_scan_converter_fini (self->converter);
1635  free(self);
1636 }
1637 
1640  const cairo_polygon_t *polygon)
1641 {
1643  int i;
1644 
1645 #if 0
1646  FILE *file = fopen ("polygon.txt", "w");
1648  fclose (file);
1649 #endif
1650 
1651  for (i = 0; i < polygon->num_edges; i++)
1652  glitter_scan_converter_add_edge (self->converter, &polygon->edges[i]);
1653 
1654  return CAIRO_STATUS_SUCCESS;
1655 }
1656 
1657 static cairo_status_t
1659  cairo_span_renderer_t *renderer)
1660 {
1663 
1664  if ((status = setjmp (self->jmp)))
1666 
1667  glitter_scan_converter_render (self->converter,
1668  self->fill_rule == CAIRO_FILL_RULE_WINDING ? ~0 : 1,
1669  self->antialias != CAIRO_ANTIALIAS_NONE,
1670  renderer);
1671  return CAIRO_STATUS_SUCCESS;
1672 }
1673 
1676  int ymin,
1677  int xmax,
1678  int ymax,
1681 {
1684 
1685  self = _cairo_malloc (sizeof(struct _cairo_tor22_scan_converter));
1686  if (unlikely (self == NULL)) {
1688  goto bail_nomem;
1689  }
1690 
1691  self->base.destroy = _cairo_tor22_scan_converter_destroy;
1692  self->base.generate = _cairo_tor22_scan_converter_generate;
1693 
1694  _glitter_scan_converter_init (self->converter, &self->jmp);
1695  status = glitter_scan_converter_reset (self->converter,
1696  xmin, ymin, xmax, ymax);
1697  if (unlikely (status))
1698  goto bail;
1699 
1700  self->fill_rule = fill_rule;
1701  self->antialias = antialias;
1702 
1703  return &self->base;
1704 
1705  bail:
1706  self->base.destroy(&self->base);
1707  bail_nomem:
1709 }
int level
Definition: afm2pl.c:1694
#define active
Definition: aptex-macros.h:325
#define head
Definition: aptex-macros.h:513
#define count(a)
Definition: aptex-macros.h:781
#define height(a)
Definition: aptex-macros.h:200
#define next(a)
Definition: aptex-macros.h:924
#define tail
Definition: aptex-macros.h:514
void _cairo_debug_print_polygon(FILE *stream, cairo_polygon_t *polygon)
Definition: cairo-debug.c:271
cairo_status_t _cairo_error(cairo_status_t status)
Definition: cairo-error.c:65
#define _cairo_malloc_ab(a, size)
#define _cairo_malloc(size)
cairo_status_t _cairo_scan_converter_set_error(void *abstract_converter, cairo_status_t error)
Definition: cairo-spans.c:58
cairo_scan_converter_t * _cairo_scan_converter_create_in_error(cairo_status_t error)
Definition: cairo-spans.c:81
cairo_status_t glitter_status_t
static void polygon_add_edge(struct polygon *polygon, const cairo_edge_t *edge)
static struct edge * merge_sorted_edges(struct edge *head_a, struct edge *head_b)
static glitter_status_t blit_a1(struct cell_list *cells, cairo_span_renderer_t *renderer, cairo_half_open_span_t *spans, int y, int height, int xmin, int xmax)
static void cell_list_maybe_rewind(struct cell_list *cells, int x)
static struct quorem floored_muldivrem(int x, int a, int b)
static void active_list_merge_edges_from_bucket(struct active_list *active, struct edge *edges)
cairo_status_t glitter_status_t
static void cell_list_fini(struct cell_list *cells)
static void cell_list_reset(struct cell_list *cells)
#define int_to_grid_scaled_y(x)
static glitter_status_t polygon_reset(struct polygon *polygon, grid_scaled_y_t ymin, grid_scaled_y_t ymax)
static void _polygon_insert_edge_into_its_y_bucket(struct polygon *polygon, struct edge *e)
static struct _pool_chunk * _pool_chunk_init(struct _pool_chunk *p, struct _pool_chunk *prev_chunk, size_t capacity)
static void pool_init(struct pool *pool, jmp_buf *jmp, size_t default_capacity, size_t embedded_capacity)
static struct edge * sort_edges(struct edge *list, unsigned int level, struct edge **head_out)
static void sub_row(struct active_list *active, struct cell_list *coverages, unsigned int mask)
static void full_row(struct active_list *active, struct cell_list *coverages, unsigned int mask)
static struct _pool_chunk * _pool_chunk_create(struct pool *pool, size_t size)
static struct cell * cell_list_find(struct cell_list *cells, int x)
int glitter_input_scaled_t
static void active_list_init(struct active_list *active)
static struct cell_pair cell_list_find_pair(struct cell_list *cells, int x1, int x2)
static void step_edges(struct active_list *active, int count)
static void cell_list_init(struct cell_list *cells, jmp_buf *jmp)
#define GLITTER_STATUS_NO_MEMORY
static void active_list_reset(struct active_list *active)
static void glitter_scan_converter_add_edge(glitter_scan_converter_t *converter, const cairo_edge_t *edge)
static void polygon_fill_buckets(struct active_list *active, struct edge *edge, int y, struct edge **buckets)
static void * _pool_alloc_from_new_chunk(struct pool *pool, size_t size)
static glitter_status_t blit_a8(struct cell_list *cells, cairo_span_renderer_t *renderer, cairo_half_open_span_t *spans, int y, int height, int xmin, int xmax)
static int can_do_full_row(struct active_list *active)
static void cell_list_set_rewind(struct cell_list *cells)
static void pool_reset(struct pool *pool)
static void pool_fini(struct pool *pool)
static struct edge * merge_unsorted_edges(struct edge *head, struct edge *unsorted)
static struct quorem floored_divrem(int a, int b)
static void _glitter_scan_converter_fini(glitter_scan_converter_t *self)
cairo_status_t _cairo_tor22_scan_converter_add_polygon(void *converter, const cairo_polygon_t *polygon)
static struct cell * cell_list_alloc(struct cell_list *cells, struct cell *tail, int x)
static grid_scaled_t int_to_grid_scaled(int i, int scale)
#define GRID_AREA_TO_ALPHA(c)
#define EDGE_Y_BUCKET_INDEX(y, ymin)
#define GRID_X_TO_INT_FRAC(x, i, f)
static void _glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
static glitter_status_t glitter_scan_converter_reset(glitter_scan_converter_t *converter, int xmin, int ymin, int xmax, int ymax)
#define UNROLL3(x)
static void glitter_scan_converter_render(glitter_scan_converter_t *converter, unsigned int winding_mask, int antialias, cairo_span_renderer_t *renderer)
static void dec(struct edge *e, int h)
static void * pool_alloc(struct pool *pool, size_t size)
static void _cairo_tor22_scan_converter_destroy(void *converter)
static void polygon_init(struct polygon *polygon, jmp_buf *jmp)
#define GRID_AREA_TO_A1(A)
static void cell_list_render_edge(struct cell_list *cells, struct edge *edge, int sign)
#define int_to_grid_scaled_x(x)
static void cell_list_add_subspan(struct cell_list *cells, grid_scaled_x_t x1, grid_scaled_x_t x2)
#define INPUT_TO_GRID_X(in, out)
#define GRID_Y
static void polygon_fini(struct polygon *polygon)
static cairo_status_t _cairo_tor22_scan_converter_generate(void *converter, cairo_span_renderer_t *renderer)
#define GRID_X
static void cell_list_rewind(struct cell_list *cells)
#define INPUT_TO_GRID_Y(in, out)
#define GLITTER_STATUS_SUCCESS
cairo_scan_converter_t * _cairo_tor22_scan_converter_create(int xmin, int ymin, int xmax, int ymax, cairo_fill_rule_t fill_rule, cairo_antialias_t antialias)
enum _cairo_fill_rule cairo_fill_rule_t
enum _cairo_antialias cairo_antialias_t
@ CAIRO_STATUS_SUCCESS
Definition: cairo.h:315
@ CAIRO_STATUS_NO_MEMORY
Definition: cairo.h:317
@ CAIRO_ANTIALIAS_NONE
Definition: cairo.h:713
enum _cairo_status cairo_status_t
@ CAIRO_FILL_RULE_WINDING
Definition: cairo.h:754
#define ARRAY_LENGTH(__array)
Definition: cairoint.h:137
#define b
Definition: jpegint.h:372
@ FALSE
Definition: dd.h:101
@ TRUE
Definition: dd.h:102
#define free(a)
Definition: decNumber.cpp:310
int h
Definition: dviconv.c:9
#define fopen
Definition: xxstdio.h:21
static FIELD_PTR prev
Definition: genind.c:36
#define a(n)
Definition: gpos-common.c:148
#define unlikely(x)
Definition: jbig2arith.cc:116
#define NULL
Definition: ftobjs.h:61
small capitals from c petite p
Definition: afcover.h:72
small capitals from c petite p scientific i
Definition: afcover.h:80
sizeof(AF_ModuleRec)
kerning y
Definition: ttdriver.c:212
@ right
Definition: annotate.c:15
signed short int16_t
Definition: stdint.h:76
unsigned int uint32_t
Definition: stdint.h:80
signed int int32_t
Definition: stdint.h:77
unsigned char uint8_t
Definition: stdint.h:78
#define cover(idx)
Definition: JPXStream.cc:177
int capacity
Definition: pdfcolor.c:1335
char is_vertical(int font_id)
Definition: tfm.c:781
#define INT_MIN
Definition: c-minmax.h:50
#define INT_MAX
Definition: c-minmax.h:53
#define UINT_MAX
Definition: c-minmax.h:56
#define fclose
Definition: debug.h:100
struct cell_struct cell
cell * list
Definition: list_routines.h:30
const int * pos
Definition: combiners.h:905
float x
Definition: cordic.py:15
static GooString antialias
Definition: pdftocairo.cc:119
#define sign(x)
set set set set set set set set set set set set set set set set set set set set *set set set macro pixldst op &r &cond WK op &r &cond WK op &r &cond WK else op &m &cond &ia op &r &cond WK else op &m &cond &ia elseif elseif else error unsupported base if elseif elseif else error unsupported unaligned pixldst unaligned endm macro pixst base base else pixldst base endif endm macro PF base if bpp PF set rept prefetch_distance PF set OFFSET endr endif endm macro preload_leading_step2 base if bpp ifc DST PF PF else if bpp lsl PF PF lsl PF sub
double scale
Definition: pnmhistmap.c:38
static int size
Definition: ppmlabel.c:24
bstring c int memset(void *s, int c, int length)
#define x1
#define x2
#define ix1
Definition: pt1.h:40
#define fx2
Definition: pt1.h:47
#define ix2
Definition: pt1.h:41
#define fx1
Definition: pt1.h:46
#define status
static void chunk(LexState *ls)
Definition: minilua.c:4678
#define mask(n)
Definition: lbitlib.c:93
#define prev_chunk(p)
Definition: lj_alloc.c:475
ShellFileEnvironment e
Definition: sh6.c:388
cairo_status_t(* render_rows)(void *abstract_renderer, int y, int height, const cairo_half_open_span_t *coverages, unsigned num_coverages)
glitter_scan_converter_t converter[1]
struct _pool_chunk * prev_chunk
struct edge head tail
struct cell_list::@439 cell_pool
struct cell embedded[32]
struct cell * rewind
struct cell * prev
struct cell * next
Definition: tfm.c:163
struct quorem x
cairo_fixed_t dy
grid_scaled_y_t ytop
cairo_bool_t vertical
grid_scaled_y_t height_left
struct edge * prev
cairo_edge_t edge
struct quorem dxdy
struct edge * next
char * line
Definition: sh.h:1693
Definition: filedef.h:30
Definition: drvpic.cpp:36
cairo_half_open_span_t * spans
cairo_half_open_span_t spans_embedded[64]
Definition: ttf.h:354
struct edge ** y_buckets
struct polygon::@438 edge_pool
grid_scaled_y_t ymin
struct edge * edges
grid_scaled_y_t ymax
struct edge * y_buckets_embedded[64]
struct edge embedded[32]
struct _pool_chunk * current
struct _pool_chunk * first_free
struct _pool_chunk sentinel[1]
cairo_fixed_t quo
cairo_fixed_t rem
#define FILE
Definition: t1stdio.h:34
int j
Definition: t4ht.c:1589
static double prev_x
Definition: tex4ht.c:1079
return() int(((double) *(font_tbl[cur_fnt].wtbl+(int)(*(font_tbl[cur_fnt].char_wi+(int)(ch - font_tbl[cur_fnt].char_f)% 256)))/(double)(1L<< 20)) *(double) font_tbl[cur_fnt].scale)