40 { 1, 3, 1, 3, 1, 3, 1, 3, },
41 { 2, 0, 2, 0, 2, 0, 2, 0, },
45 { 6, 2, 6, 2, 6, 2, 6, 2, },
46 { 0, 4, 0, 4, 0, 4, 0, 4, },
50 { 8, 4, 11, 7, 8, 4, 11, 7, },
51 { 2, 14, 1, 13, 2, 14, 1, 13, },
52 { 10, 6, 9, 5, 10, 6, 9, 5, },
53 { 0, 12, 3, 15, 0, 12, 3, 15, },
57 { 17, 9, 23, 15, 16, 8, 22, 14, },
58 { 5, 29, 3, 27, 4, 28, 2, 26, },
59 { 21, 13, 19, 11, 20, 12, 18, 10, },
60 { 0, 24, 6, 30, 1, 25, 7, 31, },
61 { 16, 8, 22, 14, 17, 9, 23, 15, },
62 { 4, 28, 2, 26, 5, 29, 3, 27, },
63 { 20, 12, 18, 10, 21, 13, 19, 11, },
64 { 1, 25, 7, 31, 0, 24, 6, 30, },
68 { 0, 55, 14, 68, 3, 58, 17, 72, },
69 { 37, 18, 50, 32, 40, 22, 54, 35, },
70 { 9, 64, 5, 59, 13, 67, 8, 63, },
71 { 46, 27, 41, 23, 49, 31, 44, 26, },
72 { 2, 57, 16, 71, 1, 56, 15, 70, },
73 { 39, 21, 52, 34, 38, 19, 51, 33, },
74 { 11, 66, 7, 62, 10, 65, 6, 60, },
75 { 48, 30, 43, 25, 47, 29, 42, 24, },
80 {117, 62, 158, 103, 113, 58, 155, 100, },
81 { 34, 199, 21, 186, 31, 196, 17, 182, },
82 {144, 89, 131, 76, 141, 86, 127, 72, },
83 { 0, 165, 41, 206, 10, 175, 52, 217, },
84 {110, 55, 151, 96, 120, 65, 162, 107, },
85 { 28, 193, 14, 179, 38, 203, 24, 189, },
86 {138, 83, 124, 69, 148, 93, 134, 79, },
87 { 7, 172, 48, 213, 3, 168, 45, 210, },
92 { 0, 143, 18, 200, 2, 156, 25, 215, },
93 { 78, 28, 125, 64, 89, 36, 138, 74, },
94 { 10, 180, 3, 161, 16, 195, 8, 175, },
95 {109, 51, 93, 38, 121, 60, 105, 47, },
96 { 1, 152, 23, 210, 0, 147, 20, 205, },
97 { 85, 33, 134, 71, 81, 30, 130, 67, },
98 { 14, 190, 6, 171, 12, 185, 5, 166, },
99 {117, 57, 101, 44, 113, 54, 97, 41, },
104 { 0, 124, 8, 193, 0, 140, 12, 213, },
105 { 55, 14, 104, 42, 66, 19, 119, 52, },
106 { 3, 168, 1, 145, 6, 187, 3, 162, },
107 { 86, 31, 70, 21, 99, 39, 82, 28, },
108 { 0, 134, 11, 206, 0, 129, 9, 200, },
109 { 62, 17, 114, 48, 58, 16, 109, 45, },
110 { 5, 181, 2, 157, 4, 175, 1, 151, },
111 { 95, 36, 78, 26, 90, 34, 74, 24, },
116 { 0, 107, 3, 187, 0, 125, 6, 212, },
117 { 39, 7, 86, 28, 49, 11, 102, 36, },
118 { 1, 158, 0, 131, 3, 180, 1, 151, },
119 { 68, 19, 52, 12, 81, 25, 64, 17, },
120 { 0, 119, 5, 203, 0, 113, 4, 195, },
121 { 45, 9, 96, 33, 42, 8, 91, 30, },
122 { 2, 172, 1, 144, 2, 165, 0, 137, },
123 { 77, 23, 60, 15, 72, 21, 56, 14, },
127 #define output_pixel(pos, val, bias, signedness) \
129 AV_WB16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
131 AV_WL16(pos, bias + av_clip_ ## signedness ## 16(val >> shift)); \
136 int big_endian,
int output_bits)
139 int shift = 19 - output_bits;
141 for (i = 0; i <
dstW; i++) {
142 int val = src[i] + (1 << (shift - 1));
150 int big_endian,
int output_bits)
153 int shift = 15 + 16 - output_bits;
155 for (i = 0; i <
dstW; i++) {
156 int val = 1 << (30-output_bits);
165 for (j = 0; j < filterSize; j++)
166 val += src[j][i] * filter[j];
174 #define output_pixel(pos, val) \
176 AV_WB16(pos, av_clip_uintp2(val >> shift, output_bits)); \
178 AV_WL16(pos, av_clip_uintp2(val >> shift, output_bits)); \
183 int big_endian,
int output_bits)
186 int shift = 15 - output_bits;
188 for (i = 0; i <
dstW; i++) {
189 int val = src[i] + (1 << (shift - 1));
196 const int16_t **src, uint16_t *dest,
int dstW,
197 int big_endian,
int output_bits)
200 int shift = 11 + 16 - output_bits;
202 for (i = 0; i <
dstW; i++) {
203 int val = 1 << (26-output_bits);
206 for (j = 0; j < filterSize; j++)
207 val += src[j][i] * filter[j];
215 #define yuv2NBPS(bits, BE_LE, is_be, template_size, typeX_t) \
216 static void yuv2plane1_ ## bits ## BE_LE ## _c(const int16_t *src, \
217 uint8_t *dest, int dstW, \
218 const uint8_t *dither, int offset)\
220 yuv2plane1_ ## template_size ## _c_template((const typeX_t *) src, \
221 (uint16_t *) dest, dstW, is_be, bits); \
223 static void yuv2planeX_ ## bits ## BE_LE ## _c(const int16_t *filter, int filterSize, \
224 const int16_t **src, uint8_t *dest, int dstW, \
225 const uint8_t *dither, int offset)\
227 yuv2planeX_## template_size ## _c_template(filter, \
228 filterSize, (const typeX_t **) src, \
229 (uint16_t *) dest, dstW, is_be, bits); \
233 yuv2NBPS(10, BE, 1, 10, int16_t)
234 yuv2NBPS(10, LE, 0, 10, int16_t)
235 yuv2NBPS(16, BE, 1, 16,
int32_t)
236 yuv2NBPS(16, LE, 0, 16,
int32_t)
238 static
void yuv2planeX_8_c(const int16_t *
filter,
int filterSize,
243 for (i=0; i<
dstW; i++) {
244 int val = dither[(i + offset) & 7] << 12;
246 for (j=0; j<filterSize; j++)
247 val += src[j][i] * filter[j];
249 dest[i]= av_clip_uint8(val>>19);
257 for (i=0; i<
dstW; i++) {
258 int val = (src[i] + dither[(i + offset) & 7]) >> 7;
259 dest[i]= av_clip_uint8(val);
264 const int16_t **chrUSrc,
const int16_t **chrVSrc,
273 int u = chrDither[i & 7] << 12;
274 int v = chrDither[(i + 3) & 7] << 12;
276 for (j=0; j<chrFilterSize; j++) {
277 u += chrUSrc[j][i] * chrFilter[j];
278 v += chrVSrc[j][i] * chrFilter[j];
281 dest[2*i]= av_clip_uint8(u>>19);
282 dest[2*i+1]= av_clip_uint8(v>>19);
286 int u = chrDither[i & 7] << 12;
287 int v = chrDither[(i + 3) & 7] << 12;
289 for (j=0; j<chrFilterSize; j++) {
290 u += chrUSrc[j][i] * chrFilter[j];
291 v += chrVSrc[j][i] * chrFilter[j];
294 dest[2*i]= av_clip_uint8(v>>19);
295 dest[2*i+1]= av_clip_uint8(u>>19);
299 #define accumulate_bit(acc, val) \
301 acc |= (val) >= (128 + 110)
302 #define output_pixel(pos, acc) \
303 if (target == AV_PIX_FMT_MONOBLACK) { \
311 const int16_t **lumSrc,
int lumFilterSize,
312 const int16_t *chrFilter,
const int16_t **chrUSrc,
313 const int16_t **chrVSrc,
int chrFilterSize,
321 for (i = 0; i <
dstW; i += 2) {
326 for (j = 0; j < lumFilterSize; j++) {
327 Y1 += lumSrc[j][i] * lumFilter[j];
328 Y2 += lumSrc[j][i+1] * lumFilter[j];
332 if ((Y1 | Y2) & 0x100) {
333 Y1 = av_clip_uint8(Y1);
334 Y2 = av_clip_uint8(Y2);
350 const int16_t *ubuf[2],
const int16_t *vbuf[2],
352 int yalpha,
int uvalpha,
int y,
355 const int16_t *buf0 = buf[0], *buf1 = buf[1];
357 int yalpha1 = 4096 - yalpha;
360 for (i = 0; i <
dstW; i += 8) {
363 Y = (buf0[i + 0] * yalpha1 + buf1[i + 0] * yalpha) >> 19;
365 Y = (buf0[i + 1] * yalpha1 + buf1[i + 1] * yalpha) >> 19;
367 Y = (buf0[i + 2] * yalpha1 + buf1[i + 2] * yalpha) >> 19;
369 Y = (buf0[i + 3] * yalpha1 + buf1[i + 3] * yalpha) >> 19;
371 Y = (buf0[i + 4] * yalpha1 + buf1[i + 4] * yalpha) >> 19;
373 Y = (buf0[i + 5] * yalpha1 + buf1[i + 5] * yalpha) >> 19;
375 Y = (buf0[i + 6] * yalpha1 + buf1[i + 6] * yalpha) >> 19;
377 Y = (buf0[i + 7] * yalpha1 + buf1[i + 7] * yalpha) >> 19;
386 const int16_t *ubuf[2],
const int16_t *vbuf[2],
393 for (i = 0; i <
dstW; i += 8) {
410 #undef accumulate_bit
412 #define YUV2PACKEDWRAPPER(name, base, ext, fmt) \
413 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
414 const int16_t **lumSrc, int lumFilterSize, \
415 const int16_t *chrFilter, const int16_t **chrUSrc, \
416 const int16_t **chrVSrc, int chrFilterSize, \
417 const int16_t **alpSrc, uint8_t *dest, int dstW, \
420 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
421 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
422 alpSrc, dest, dstW, y, fmt); \
425 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
426 const int16_t *ubuf[2], const int16_t *vbuf[2], \
427 const int16_t *abuf[2], uint8_t *dest, int dstW, \
428 int yalpha, int uvalpha, int y) \
430 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
431 dest, dstW, yalpha, uvalpha, y, fmt); \
434 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
435 const int16_t *ubuf[2], const int16_t *vbuf[2], \
436 const int16_t *abuf0, uint8_t *dest, int dstW, \
437 int uvalpha, int y) \
439 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, \
440 abuf0, dest, dstW, uvalpha, \
447 #define output_pixels(pos, Y1, U, Y2, V) \
448 if (target == AV_PIX_FMT_YUYV422) { \
449 dest[pos + 0] = Y1; \
451 dest[pos + 2] = Y2; \
455 dest[pos + 1] = Y1; \
457 dest[pos + 3] = Y2; \
462 const int16_t **lumSrc,
int lumFilterSize,
463 const int16_t *chrFilter,
const int16_t **chrUSrc,
464 const int16_t **chrVSrc,
int chrFilterSize,
470 for (i = 0; i < ((dstW + 1) >> 1); i++) {
477 for (j = 0; j < lumFilterSize; j++) {
478 Y1 += lumSrc[j][i * 2] * lumFilter[j];
479 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
481 for (j = 0; j < chrFilterSize; j++) {
482 U += chrUSrc[j][i] * chrFilter[j];
483 V += chrVSrc[j][i] * chrFilter[j];
489 if ((Y1 | Y2 | U | V) & 0x100) {
490 Y1 = av_clip_uint8(Y1);
491 Y2 = av_clip_uint8(Y2);
492 U = av_clip_uint8(U);
493 V = av_clip_uint8(V);
501 const int16_t *ubuf[2],
const int16_t *vbuf[2],
503 int yalpha,
int uvalpha,
int y,
506 const int16_t *buf0 = buf[0], *buf1 = buf[1],
507 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
508 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
509 int yalpha1 = 4096 - yalpha;
510 int uvalpha1 = 4096 - uvalpha;
513 for (i = 0; i < ((dstW + 1) >> 1); i++) {
514 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
515 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
516 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
517 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
519 Y1 = av_clip_uint8(Y1);
520 Y2 = av_clip_uint8(Y2);
521 U = av_clip_uint8(U);
522 V = av_clip_uint8(V);
530 const int16_t *ubuf[2],
const int16_t *vbuf[2],
534 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
537 if (uvalpha < 2048) {
538 for (i = 0; i < ((dstW + 1) >> 1); i++) {
539 int Y1 = buf0[i * 2] >> 7;
540 int Y2 = buf0[i * 2 + 1] >> 7;
541 int U = ubuf0[i] >> 7;
542 int V = vbuf0[i] >> 7;
544 Y1 = av_clip_uint8(Y1);
545 Y2 = av_clip_uint8(Y2);
546 U = av_clip_uint8(U);
547 V = av_clip_uint8(V);
552 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
553 for (i = 0; i < ((dstW + 1) >> 1); i++) {
554 int Y1 = buf0[i * 2] >> 7;
555 int Y2 = buf0[i * 2 + 1] >> 7;
556 int U = (ubuf0[i] + ubuf1[i]) >> 8;
557 int V = (vbuf0[i] + vbuf1[i]) >> 8;
559 Y1 = av_clip_uint8(Y1);
560 Y2 = av_clip_uint8(Y2);
561 U = av_clip_uint8(U);
562 V = av_clip_uint8(V);
574 #define R_B ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? R : B)
575 #define B_R ((target == AV_PIX_FMT_RGB48LE || target == AV_PIX_FMT_RGB48BE) ? B : R)
576 #define output_pixel(pos, val) \
577 if (isBE(target)) { \
585 const int32_t **lumSrc,
int lumFilterSize,
586 const int16_t *chrFilter,
const int32_t **chrUSrc,
587 const int32_t **chrVSrc,
int chrFilterSize,
593 for (i = 0; i < ((dstW + 1) >> 1); i++) {
595 int Y1 = -0x40000000;
596 int Y2 = -0x40000000;
601 for (j = 0; j < lumFilterSize; j++) {
602 Y1 += lumSrc[j][i * 2] * lumFilter[j];
603 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
605 for (j = 0; j < chrFilterSize; j++) {
606 U += chrUSrc[j][i] * chrFilter[j];
607 V += chrVSrc[j][i] * chrFilter[j];
633 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
636 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
646 int yalpha,
int uvalpha,
int y,
649 const int32_t *buf0 = buf[0], *buf1 = buf[1],
650 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
651 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1];
652 int yalpha1 = 4096 - yalpha;
653 int uvalpha1 = 4096 - uvalpha;
656 for (i = 0; i < ((dstW + 1) >> 1); i++) {
657 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 14;
658 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 14;
659 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha + (-128 << 23)) >> 14;
660 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha + (-128 << 23)) >> 14;
675 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
678 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
690 const int32_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
693 if (uvalpha < 2048) {
694 for (i = 0; i < ((dstW + 1) >> 1); i++) {
695 int Y1 = (buf0[i * 2] ) >> 2;
696 int Y2 = (buf0[i * 2 + 1]) >> 2;
697 int U = (ubuf0[i] + (-128 << 11)) >> 2;
698 int V = (vbuf0[i] + (-128 << 11)) >> 2;
713 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
716 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
721 const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
722 for (i = 0; i < ((dstW + 1) >> 1); i++) {
723 int Y1 = (buf0[i * 2] ) >> 2;
724 int Y2 = (buf0[i * 2 + 1]) >> 2;
725 int U = (ubuf0[i] + ubuf1[i] + (-128 << 12)) >> 3;
726 int V = (vbuf0[i] + vbuf1[i] + (-128 << 12)) >> 3;
741 output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14);
744 output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14);
755 #define YUV2PACKED16WRAPPER(name, base, ext, fmt) \
756 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
757 const int16_t **_lumSrc, int lumFilterSize, \
758 const int16_t *chrFilter, const int16_t **_chrUSrc, \
759 const int16_t **_chrVSrc, int chrFilterSize, \
760 const int16_t **_alpSrc, uint8_t *_dest, int dstW, \
763 const int32_t **lumSrc = (const int32_t **) _lumSrc, \
764 **chrUSrc = (const int32_t **) _chrUSrc, \
765 **chrVSrc = (const int32_t **) _chrVSrc, \
766 **alpSrc = (const int32_t **) _alpSrc; \
767 uint16_t *dest = (uint16_t *) _dest; \
768 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
769 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
770 alpSrc, dest, dstW, y, fmt); \
773 static void name ## ext ## _2_c(SwsContext *c, const int16_t *_buf[2], \
774 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
775 const int16_t *_abuf[2], uint8_t *_dest, int dstW, \
776 int yalpha, int uvalpha, int y) \
778 const int32_t **buf = (const int32_t **) _buf, \
779 **ubuf = (const int32_t **) _ubuf, \
780 **vbuf = (const int32_t **) _vbuf, \
781 **abuf = (const int32_t **) _abuf; \
782 uint16_t *dest = (uint16_t *) _dest; \
783 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
784 dest, dstW, yalpha, uvalpha, y, fmt); \
787 static void name ## ext ## _1_c(SwsContext *c, const int16_t *_buf0, \
788 const int16_t *_ubuf[2], const int16_t *_vbuf[2], \
789 const int16_t *_abuf0, uint8_t *_dest, int dstW, \
790 int uvalpha, int y) \
792 const int32_t *buf0 = (const int32_t *) _buf0, \
793 **ubuf = (const int32_t **) _ubuf, \
794 **vbuf = (const int32_t **) _vbuf, \
795 *abuf0 = (const int32_t *) _abuf0; \
796 uint16_t *dest = (uint16_t *) _dest; \
797 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
798 dstW, uvalpha, y, fmt); \
816 unsigned A1,
unsigned A2,
817 const
void *_r, const
void *_g, const
void *_b,
int y,
822 uint32_t *dest = (uint32_t *) _dest;
823 const uint32_t *
r = (
const uint32_t *) _r;
824 const uint32_t *
g = (
const uint32_t *) _g;
825 const uint32_t *
b = (
const uint32_t *) _b;
830 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (hasAlpha ? A1 << sh : 0);
831 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (hasAlpha ? A2 << sh : 0);
836 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1] + (A1 << sh);
837 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2] + (A2 << sh);
839 dest[i * 2 + 0] = r[Y1] + g[Y1] + b[Y1];
840 dest[i * 2 + 1] = r[Y2] + g[Y2] + b[Y2];
849 #define r_b ((target == AV_PIX_FMT_RGB24) ? r : b)
850 #define b_r ((target == AV_PIX_FMT_RGB24) ? b : r)
851 dest[i * 6 + 0] =
r_b[Y1];
852 dest[i * 6 + 1] = g[Y1];
853 dest[i * 6 + 2] =
b_r[Y1];
854 dest[i * 6 + 3] =
r_b[Y2];
855 dest[i * 6 + 4] = g[Y2];
856 dest[i * 6 + 5] =
b_r[Y2];
862 uint16_t *dest = (uint16_t *) _dest;
863 const uint16_t *
r = (
const uint16_t *) _r;
864 const uint16_t *
g = (
const uint16_t *) _g;
865 const uint16_t *
b = (
const uint16_t *) _b;
866 int dr1, dg1, db1, dr2, dg2, db2;
891 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
892 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
898 int dr1, dg1, db1, dr2, dg2, db2;
903 dr1 = dg1 = d32[(i * 2 + 0) & 7];
904 db1 = d64[(i * 2 + 0) & 7];
905 dr2 = dg2 = d32[(i * 2 + 1) & 7];
906 db2 = d64[(i * 2 + 1) & 7];
910 dr1 = db1 = d128[(i * 2 + 0) & 7];
911 dg1 = d64[(i * 2 + 0) & 7];
912 dr2 = db2 = d128[(i * 2 + 1) & 7];
913 dg2 = d64[(i * 2 + 1) & 7];
917 dest[i] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1] +
918 ((r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2]) << 4);
920 dest[i * 2 + 0] = r[Y1 + dr1] + g[Y1 + dg1] + b[Y1 + db1];
921 dest[i * 2 + 1] = r[Y2 + dr2] + g[Y2 + dg2] + b[Y2 + db2];
928 const int16_t **lumSrc,
int lumFilterSize,
929 const int16_t *chrFilter,
const int16_t **chrUSrc,
930 const int16_t **chrVSrc,
int chrFilterSize,
936 for (i = 0; i < ((dstW + 1) >> 1); i++) {
942 const void *
r, *
g, *
b;
944 for (j = 0; j < lumFilterSize; j++) {
945 Y1 += lumSrc[j][i * 2] * lumFilter[j];
946 Y2 += lumSrc[j][i * 2 + 1] * lumFilter[j];
948 for (j = 0; j < chrFilterSize; j++) {
949 U += chrUSrc[j][i] * chrFilter[j];
950 V += chrVSrc[j][i] * chrFilter[j];
956 if ((Y1 | Y2 | U | V) & 0x100) {
957 Y1 = av_clip_uint8(Y1);
958 Y2 = av_clip_uint8(Y2);
959 U = av_clip_uint8(U);
960 V = av_clip_uint8(V);
965 for (j = 0; j < lumFilterSize; j++) {
966 A1 += alpSrc[j][i * 2 ] * lumFilter[j];
967 A2 += alpSrc[j][i * 2 + 1] * lumFilter[j];
971 if ((A1 | A2) & 0x100) {
972 A1 = av_clip_uint8(A1);
973 A2 = av_clip_uint8(A2);
982 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
983 r, g, b, y, target, hasAlpha);
989 const int16_t *ubuf[2],
const int16_t *vbuf[2],
991 int yalpha,
int uvalpha,
int y,
994 const int16_t *buf0 = buf[0], *buf1 = buf[1],
995 *ubuf0 = ubuf[0], *ubuf1 = ubuf[1],
996 *vbuf0 = vbuf[0], *vbuf1 = vbuf[1],
997 *abuf0 = hasAlpha ? abuf[0] :
NULL,
998 *abuf1 = hasAlpha ? abuf[1] :
NULL;
999 int yalpha1 = 4096 - yalpha;
1000 int uvalpha1 = 4096 - uvalpha;
1003 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1004 int Y1 = (buf0[i * 2] * yalpha1 + buf1[i * 2] * yalpha) >> 19;
1005 int Y2 = (buf0[i * 2 + 1] * yalpha1 + buf1[i * 2 + 1] * yalpha) >> 19;
1006 int U = (ubuf0[i] * uvalpha1 + ubuf1[i] * uvalpha) >> 19;
1007 int V = (vbuf0[i] * uvalpha1 + vbuf1[i] * uvalpha) >> 19;
1009 const void *
r, *
g, *
b;
1011 Y1 = av_clip_uint8(Y1);
1012 Y2 = av_clip_uint8(Y2);
1013 U = av_clip_uint8(U);
1014 V = av_clip_uint8(V);
1021 A1 = (abuf0[i * 2 ] * yalpha1 + abuf1[i * 2 ] * yalpha) >> 19;
1022 A2 = (abuf0[i * 2 + 1] * yalpha1 + abuf1[i * 2 + 1] * yalpha) >> 19;
1023 A1 = av_clip_uint8(A1);
1024 A2 = av_clip_uint8(A2);
1027 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1028 r, g, b, y, target, hasAlpha);
1034 const int16_t *ubuf[2],
const int16_t *vbuf[2],
1039 const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
1042 if (uvalpha < 2048) {
1043 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1044 int Y1 = buf0[i * 2] >> 7;
1045 int Y2 = buf0[i * 2 + 1] >> 7;
1046 int U = ubuf0[i] >> 7;
1047 int V = vbuf0[i] >> 7;
1049 const void *
r, *
g, *
b;
1051 Y1 = av_clip_uint8(Y1);
1052 Y2 = av_clip_uint8(Y2);
1053 U = av_clip_uint8(U);
1054 V = av_clip_uint8(V);
1061 A1 = abuf0[i * 2 ] >> 7;
1062 A2 = abuf0[i * 2 + 1] >> 7;
1063 A1 = av_clip_uint8(A1);
1064 A2 = av_clip_uint8(A2);
1067 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1068 r, g, b, y, target, hasAlpha);
1071 const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
1072 for (i = 0; i < ((dstW + 1) >> 1); i++) {
1073 int Y1 = buf0[i * 2] >> 7;
1074 int Y2 = buf0[i * 2 + 1] >> 7;
1075 int U = (ubuf0[i] + ubuf1[i]) >> 8;
1076 int V = (vbuf0[i] + vbuf1[i]) >> 8;
1078 const void *
r, *
g, *
b;
1080 Y1 = av_clip_uint8(Y1);
1081 Y2 = av_clip_uint8(Y2);
1082 U = av_clip_uint8(U);
1083 V = av_clip_uint8(V);
1090 A1 = abuf0[i * 2 ] >> 7;
1091 A2 = abuf0[i * 2 + 1] >> 7;
1092 A1 = av_clip_uint8(A1);
1093 A2 = av_clip_uint8(A2);
1096 yuv2rgb_write(dest, i, Y1, Y2, hasAlpha ? A1 : 0, hasAlpha ? A2 : 0,
1097 r, g, b, y, target, hasAlpha);
1102 #define YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1103 static void name ## ext ## _X_c(SwsContext *c, const int16_t *lumFilter, \
1104 const int16_t **lumSrc, int lumFilterSize, \
1105 const int16_t *chrFilter, const int16_t **chrUSrc, \
1106 const int16_t **chrVSrc, int chrFilterSize, \
1107 const int16_t **alpSrc, uint8_t *dest, int dstW, \
1110 name ## base ## _X_c_template(c, lumFilter, lumSrc, lumFilterSize, \
1111 chrFilter, chrUSrc, chrVSrc, chrFilterSize, \
1112 alpSrc, dest, dstW, y, fmt, hasAlpha); \
1114 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
1115 YUV2RGBWRAPPERX(name, base, ext, fmt, hasAlpha) \
1116 static void name ## ext ## _2_c(SwsContext *c, const int16_t *buf[2], \
1117 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1118 const int16_t *abuf[2], uint8_t *dest, int dstW, \
1119 int yalpha, int uvalpha, int y) \
1121 name ## base ## _2_c_template(c, buf, ubuf, vbuf, abuf, \
1122 dest, dstW, yalpha, uvalpha, y, fmt, hasAlpha); \
1125 static void name ## ext ## _1_c(SwsContext *c, const int16_t *buf0, \
1126 const int16_t *ubuf[2], const int16_t *vbuf[2], \
1127 const int16_t *abuf0, uint8_t *dest, int dstW, \
1128 int uvalpha, int y) \
1130 name ## base ## _1_c_template(c, buf0, ubuf, vbuf, abuf0, dest, \
1131 dstW, uvalpha, y, fmt, hasAlpha); \
1138 #if CONFIG_SWSCALE_ALPHA
1156 const int16_t **lumSrc,
int lumFilterSize,
1157 const int16_t *chrFilter, const int16_t **chrUSrc,
1158 const int16_t **chrVSrc,
int chrFilterSize,
1159 const int16_t **alpSrc,
uint8_t *dest,
1165 for (i = 0; i <
dstW; i++) {
1172 for (j = 0; j < lumFilterSize; j++) {
1173 Y += lumSrc[j][i] * lumFilter[j];
1175 for (j = 0; j < chrFilterSize; j++) {
1176 U += chrUSrc[j][i] * chrFilter[j];
1177 V += chrVSrc[j][i] * chrFilter[j];
1184 for (j = 0; j < lumFilterSize; j++) {
1185 A += alpSrc[j][i] * lumFilter[j];
1189 A = av_clip_uint8(A);
1191 Y -= c->yuv2rgb_y_offset;
1192 Y *= c->yuv2rgb_y_coeff;
1194 R = Y + V*c->yuv2rgb_v2r_coeff;
1195 G = Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;
1196 B = Y + U*c->yuv2rgb_u2b_coeff;
1197 if ((R | G | B) & 0xC0000000) {
1198 R = av_clip_uintp2(R, 30);
1199 G = av_clip_uintp2(G, 30);
1200 B = av_clip_uintp2(B, 30);
1205 dest[0] = hasAlpha ? A : 255;
1219 dest[3] = hasAlpha ? A : 255;
1222 dest[0] = hasAlpha ? A : 255;
1228 case AV_PIX_FMT_BGR24:
1237 dest[3] = hasAlpha ? A : 255;
1250 #if CONFIG_SWSCALE_ALPHA
1266 const int16_t **lumSrc,
int lumFilterSize,
1267 const int16_t *chrFilter, const int16_t **chrUSrc,
1268 const int16_t **chrVSrc,
int chrFilterSize,
1269 const int16_t **alpSrc,
uint8_t **dest,
1275 uint16_t **dest16 = (uint16_t**)dest;
1278 for (i = 0; i <
dstW; i++) {
1281 int U = (1 << 9) - (128 << 19);
1282 int V = (1 << 9) - (128 << 19);
1285 for (j = 0; j < lumFilterSize; j++)
1286 Y += lumSrc[j][i] * lumFilter[j];
1288 for (j = 0; j < chrFilterSize; j++) {
1289 U += chrUSrc[j][i] * chrFilter[j];
1290 V += chrVSrc[j][i] * chrFilter[j];
1300 for (j = 0; j < lumFilterSize; j++)
1301 A += alpSrc[j][i] * lumFilter[j];
1306 A = av_clip_uint8(A);
1309 Y -= c->yuv2rgb_y_offset;
1310 Y *= c->yuv2rgb_y_coeff;
1312 R = Y + V * c->yuv2rgb_v2r_coeff;
1313 G = Y + V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff;
1314 B = Y + U * c->yuv2rgb_u2b_coeff;
1316 if ((R | G | B) & 0xC0000000) {
1317 R = av_clip_uintp2(R, 30);
1318 G = av_clip_uintp2(G, 30);
1319 B = av_clip_uintp2(B, 30);
1323 dest16[0][i] = G >> SH;
1324 dest16[1][i] = B >> SH;
1325 dest16[2][i] = R >> SH;
1327 dest[0][i] = G >> 22;
1328 dest[1][i] = B >> 22;
1329 dest[2][i] = R >> 22;
1333 for (i = 0; i <
dstW; i++) {
1354 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_16BE_c : yuv2planeX_16LE_c;
1355 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_16BE_c : yuv2plane1_16LE_c;
1358 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_9BE_c : yuv2planeX_9LE_c;
1359 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_9BE_c : yuv2plane1_9LE_c;
1361 *yuv2planeX =
isBE(dstFormat) ? yuv2planeX_10BE_c : yuv2planeX_10LE_c;
1362 *yuv2plane1 =
isBE(dstFormat) ? yuv2plane1_10BE_c : yuv2plane1_10LE_c;
1366 *yuv2planeX = yuv2planeX_8_c;
1372 switch (dstFormat) {
1373 case AV_PIX_FMT_RGBA:
1375 *yuv2packedX = yuv2rgba32_full_X_c;
1377 #if CONFIG_SWSCALE_ALPHA
1379 *yuv2packedX = yuv2rgba32_full_X_c;
1383 *yuv2packedX = yuv2rgbx32_full_X_c;
1387 case AV_PIX_FMT_ARGB:
1389 *yuv2packedX = yuv2argb32_full_X_c;
1391 #if CONFIG_SWSCALE_ALPHA
1393 *yuv2packedX = yuv2argb32_full_X_c;
1397 *yuv2packedX = yuv2xrgb32_full_X_c;
1403 *yuv2packedX = yuv2bgra32_full_X_c;
1405 #if CONFIG_SWSCALE_ALPHA
1407 *yuv2packedX = yuv2bgra32_full_X_c;
1411 *yuv2packedX = yuv2bgrx32_full_X_c;
1415 case AV_PIX_FMT_ABGR:
1417 *yuv2packedX = yuv2abgr32_full_X_c;
1419 #if CONFIG_SWSCALE_ALPHA
1421 *yuv2packedX = yuv2abgr32_full_X_c;
1425 *yuv2packedX = yuv2xbgr32_full_X_c;
1430 *yuv2packedX = yuv2rgb24_full_X_c;
1433 *yuv2packedX = yuv2bgr24_full_X_c;
1446 switch (dstFormat) {
1448 *yuv2packed1 = yuv2rgb48le_1_c;
1449 *yuv2packed2 = yuv2rgb48le_2_c;
1450 *yuv2packedX = yuv2rgb48le_X_c;
1453 *yuv2packed1 = yuv2rgb48be_1_c;
1454 *yuv2packed2 = yuv2rgb48be_2_c;
1455 *yuv2packedX = yuv2rgb48be_X_c;
1458 *yuv2packed1 = yuv2bgr48le_1_c;
1459 *yuv2packed2 = yuv2bgr48le_2_c;
1460 *yuv2packedX = yuv2bgr48le_X_c;
1463 *yuv2packed1 = yuv2bgr48be_1_c;
1464 *yuv2packed2 = yuv2bgr48be_2_c;
1465 *yuv2packedX = yuv2bgr48be_X_c;
1467 case AV_PIX_FMT_RGB32:
1470 *yuv2packed1 = yuv2rgb32_1_c;
1471 *yuv2packed2 = yuv2rgb32_2_c;
1472 *yuv2packedX = yuv2rgb32_X_c;
1474 #if CONFIG_SWSCALE_ALPHA
1476 *yuv2packed1 = yuv2rgba32_1_c;
1477 *yuv2packed2 = yuv2rgba32_2_c;
1478 *yuv2packedX = yuv2rgba32_X_c;
1482 *yuv2packed1 = yuv2rgbx32_1_c;
1483 *yuv2packed2 = yuv2rgbx32_2_c;
1484 *yuv2packedX = yuv2rgbx32_X_c;
1491 *yuv2packed1 = yuv2rgb32_1_1_c;
1492 *yuv2packed2 = yuv2rgb32_1_2_c;
1493 *yuv2packedX = yuv2rgb32_1_X_c;
1495 #if CONFIG_SWSCALE_ALPHA
1497 *yuv2packed1 = yuv2rgba32_1_1_c;
1498 *yuv2packed2 = yuv2rgba32_1_2_c;
1499 *yuv2packedX = yuv2rgba32_1_X_c;
1503 *yuv2packed1 = yuv2rgbx32_1_1_c;
1504 *yuv2packed2 = yuv2rgbx32_1_2_c;
1505 *yuv2packedX = yuv2rgbx32_1_X_c;
1510 *yuv2packed1 = yuv2rgb24_1_c;
1511 *yuv2packed2 = yuv2rgb24_2_c;
1512 *yuv2packedX = yuv2rgb24_X_c;
1515 *yuv2packed1 = yuv2bgr24_1_c;
1516 *yuv2packed2 = yuv2bgr24_2_c;
1517 *yuv2packedX = yuv2bgr24_X_c;
1523 *yuv2packed1 = yuv2rgb16_1_c;
1524 *yuv2packed2 = yuv2rgb16_2_c;
1525 *yuv2packedX = yuv2rgb16_X_c;
1531 *yuv2packed1 = yuv2rgb15_1_c;
1532 *yuv2packed2 = yuv2rgb15_2_c;
1533 *yuv2packedX = yuv2rgb15_X_c;
1539 *yuv2packed1 = yuv2rgb12_1_c;
1540 *yuv2packed2 = yuv2rgb12_2_c;
1541 *yuv2packedX = yuv2rgb12_X_c;
1545 *yuv2packed1 = yuv2rgb8_1_c;
1546 *yuv2packed2 = yuv2rgb8_2_c;
1547 *yuv2packedX = yuv2rgb8_X_c;
1551 *yuv2packed1 = yuv2rgb4_1_c;
1552 *yuv2packed2 = yuv2rgb4_2_c;
1553 *yuv2packedX = yuv2rgb4_X_c;
1557 *yuv2packed1 = yuv2rgb4b_1_c;
1558 *yuv2packed2 = yuv2rgb4b_2_c;
1559 *yuv2packedX = yuv2rgb4b_X_c;
1563 switch (dstFormat) {
1565 *yuv2packed1 = yuv2monowhite_1_c;
1566 *yuv2packed2 = yuv2monowhite_2_c;
1567 *yuv2packedX = yuv2monowhite_X_c;
1570 *yuv2packed1 = yuv2monoblack_1_c;
1571 *yuv2packed2 = yuv2monoblack_2_c;
1572 *yuv2packedX = yuv2monoblack_X_c;
1575 *yuv2packed1 = yuv2yuyv422_1_c;
1576 *yuv2packed2 = yuv2yuyv422_2_c;
1577 *yuv2packedX = yuv2yuyv422_X_c;
1580 *yuv2packed1 = yuv2uyvy422_1_c;
1581 *yuv2packed2 = yuv2uyvy422_2_c;
1582 *yuv2packedX = yuv2uyvy422_X_c;