41 #define VP6_MAX_HUFF_SIZE 12
50 int parse_filter_info = 0;
56 int separated_coeff = buf[0] & 1;
62 sub_version = buf[1] >> 3;
65 s->filter_header = buf[1] & 0x06;
70 if (separated_coeff || !s->filter_header) {
71 coeff_offset =
AV_RB16(buf+2) - 2;
85 if (!s->macroblocks ||
86 16*cols != s->avctx->coded_width ||
87 16*rows != s->avctx->coded_height) {
88 if (s->avctx->extradata_size == 0 &&
89 FFALIGN(s->avctx->width, 16) == 16 * cols &&
90 FFALIGN(s->avctx->height, 16) == 16 * rows) {
94 s->avctx->coded_width = 16 * cols;
95 s->avctx->coded_height = 16 * rows;
101 if (s->avctx->extradata_size == 1) {
102 s->avctx->width -= s->avctx->extradata[0] >> 4;
103 s->avctx->height -= s->avctx->extradata[0] & 0x0F;
112 parse_filter_info = s->filter_header;
115 s->sub_version = sub_version;
117 if (!s->sub_version || !s->avctx->coded_width || !s->avctx->coded_height)
120 if (separated_coeff || !s->filter_header) {
121 coeff_offset =
AV_RB16(buf+1) - 2;
128 if (s->filter_header) {
130 if (s->deblock_filtering)
132 if (s->sub_version > 7)
137 if (parse_filter_info) {
140 s->sample_variance_threshold =
vp56_rac_gets(c, 5) << vrt_shift;
147 if (s->sub_version > 7)
150 s->filter_selection = 16;
158 buf_size -= coeff_offset;
164 if (s->use_huffman) {
182 s->modelp->coeff_index_to_pos[0] = 0;
184 for (pos=1; pos<64; pos++)
185 if (s->modelp->coeff_reorder[pos] == i)
186 s->modelp->coeff_index_to_pos[idx++] = pos;
213 for (comp=0; comp<2; comp++) {
220 for (comp=0; comp<2; comp++)
221 for (node=0; node<7; node++)
225 for (comp=0; comp<2; comp++)
226 for (node=0; node<8; node++)
234 const Node *
a = va, *
b = vb;
235 return (a->
count - b->count)*16 + (b->sym - a->
sym);
246 for (i=0; i<size-1; i++) {
247 a = tmp[i].
count * coeff_model[i] >> 8;
248 b = tmp[i].
count * (255 - coeff_model[i]) >> 8;
249 nodes[map[2*i ]].
count = a + !
a;
250 nodes[map[2*i+1]].
count = b + !
b;
264 int node, cg, ctx, pos;
268 memset(def_prob, 0x80,
sizeof(def_prob));
270 for (pt=0; pt<2; pt++)
271 for (node=0; node<11; node++)
280 for (pos=1; pos<64; pos++)
286 for (cg=0; cg<2; cg++)
287 for (node=0; node<14; node++)
291 for (ct=0; ct<3; ct++)
292 for (pt=0; pt<2; pt++)
293 for (cg=0; cg<6; cg++)
294 for (node=0; node<11; node++)
302 if (s->use_huffman) {
303 for (pt=0; pt<2; pt++) {
310 for (ct=0; ct<3; ct++)
311 for (cg = 0; cg < 6; cg++)
314 &s->ract_vlc[pt][ct][cg]))
317 memset(s->nb_null, 0,
sizeof(s->nb_null));
320 for (pt=0; pt<2; pt++)
321 for (ctx=0; ctx<3; ctx++)
322 for (node=0; node<5; node++)
335 if (s->vector_candidate_pos < 2)
336 *vect = s->vector_candidate[0];
338 for (comp=0; comp<2; comp++) {
342 static const uint8_t prob_order[] = {0, 1, 2, 7, 6, 5, 4};
343 for (i=0; i<
sizeof(prob_order); i++) {
344 int j = prob_order[i];
377 val = 6+val +
get_bits(&s->gb, 2+val);
387 int coeff, sign, coeff_idx;
391 for (b=0; b<6; b++) {
394 vlc_coeff = &s->dccv_vlc[
pt];
396 for (coeff_idx = 0;;) {
398 if (coeff_idx<2 && s->nb_null[coeff_idx][pt]) {
399 s->nb_null[coeff_idx][
pt]--;
408 int pt = (coeff_idx >= 6);
409 run +=
get_vlc2(&s->gb, s->runv_vlc[pt].table, 9, 3);
415 }
else if (coeff == 11) {
422 coeff2 +=
get_bits(&s->gb, coeff <= 9 ? coeff - 4 : 11);
423 ct = 1 + (coeff2 > 1);
425 coeff2 = (coeff2 ^ -sign) + sign;
427 coeff2 *= s->dequant_ac;
429 s->block_coeff[
b][permute[idx]] = coeff2;
436 vlc_coeff = &s->ract_vlc[
pt][ct][cg];
446 uint8_t *model1, *model2, *model3;
447 int coeff, sign, coeff_idx;
448 int b, i, cg, idx, ctx;
451 for (b=0; b<6; b++) {
458 + s->above_blocks[s->above_block_idx[
b]].not_null_dc;
484 coeff = (coeff ^ -sign) + sign;
486 coeff *= s->dequant_ac;
488 s->block_coeff[
b][permute[idx]] = coeff;
500 for (run=9, i=0; i<6; i++)
512 s->above_blocks[s->above_block_idx[
b]].not_null_dc = !!s->block_coeff[
b][0];
518 int sum = 0, square_sum = 0;
521 for (y=0; y<8; y+=2) {
522 for (x=0; x<8; x+=2) {
524 square_sum += src[x]*src[x];
528 return (16*square_sum - sum*sum) >> 8;
532 int delta,
const int16_t *weights)
536 for (y=0; y<8; y++) {
537 for (x=0; x<8; x++) {
538 dst[x] = av_clip_uint8(( src[x-delta ] * weights[0]
539 + src[x ] * weights[1]
540 + src[x+delta ] * weights[2]
541 + src[x+2*delta] * weights[3] + 64) >> 7);
549 int stride,
int h_weight,
int v_weight)
551 uint8_t *tmp = s->edge_emu_buffer+16;
552 s->h264chroma.put_h264_chroma_pixels_tab[0](tmp, src,
stride, 9, h_weight, 0);
553 s->h264chroma.put_h264_chroma_pixels_tab[0](dst, tmp,
stride, 8, 0, v_weight);
557 int offset1,
int offset2,
int stride,
561 int x8 = mv.
x &
mask;
562 int y8 = mv.
y &
mask;
567 filter4 = s->filter_mode;
569 if (s->max_vector_length &&
570 (
FFABS(mv.
x) > s->max_vector_length ||
571 FFABS(mv.
y) > s->max_vector_length)) {
573 }
else if (s->sample_variance_threshold
575 < s->sample_variance_threshold)) {
581 if ((y8 && (offset2-offset1)*s->flip<0) || (!y8 && offset1 > offset2)) {
593 s->vp56dsp.vp6_filter_diag4(dst, src+offset1+((mv.
x^mv.
y)>>31), stride,
599 s->h264chroma.put_h264_chroma_pixels_tab[0](dst, src + offset1,
stride, 8, x8, y8);
633 for (pt=0; pt<2; pt++) {
636 for (ct=0; ct<3; ct++)
637 for (cg=0; cg<6; cg++)
648 .priv_data_size =
sizeof(VP56Context),
661 .priv_data_size =
sizeof(VP56Context),
674 .priv_data_size =
sizeof(VP56Context),