48 #define MB_INTRA_VLC_BITS 9
54 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
94 int topleft_mb_pos, top_mb_pos;
95 int stride_y, fieldtx = 0;
111 v_dist = (16 - fieldtx) >> (fieldtx == 0);
136 v_dist = fieldtx ? 15 : 8;
158 #define inc_blk_idx(idx) do { \
160 if (idx >= v->n_allocated_blks) \
179 for (j = 0; j < 2; j++) {
212 for (j = 0; j < 2; j++) {
229 for (j = 0; j < 2; j++) {
245 for (j = 0; j < 2; j++) {
256 for (j = 0; j < 2; j++) {
350 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
353 uint8_t (*luty)[256], (*lutuv)[256];
361 mx = s->
mv[dir][0][0];
362 my = s->
mv[dir][0][1];
366 for (i = 0; i < 4; i++) {
372 uvmx = (mx + ((mx & 3) == 3)) >> 1;
373 uvmy = (my + ((my & 3) == 3)) >> 1;
385 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
386 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
413 if (!srcY || !srcU) {
418 src_x = s->
mb_x * 16 + (mx >> 2);
419 src_y = s->
mb_y * 16 + (my >> 2);
420 uvsrc_x = s->
mb_x * 8 + (uvmx >> 2);
421 uvsrc_y = s->
mb_y * 8 + (uvmy >> 2);
424 src_x = av_clip( src_x, -16, s->
mb_width * 16);
425 src_y = av_clip( src_y, -16, s->
mb_height * 16);
426 uvsrc_x = av_clip(uvsrc_x, -8, s->
mb_width * 8);
427 uvsrc_y = av_clip(uvsrc_y, -8, s->
mb_height * 8);
435 srcY += src_y * s->
linesize + src_x;
454 || (
unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
467 uvsrc_x, uvsrc_y, s->
h_edge_pos >> 1, v_edge_pos >> 1);
471 uvsrc_x, uvsrc_y, s->
h_edge_pos >> 1, v_edge_pos >> 1);
480 for (j = 0; j < 17 + s->
mspel * 2; j++) {
481 for (i = 0; i < 17 + s->
mspel * 2; i++)
482 src[i] = ((src[i] - 128) >> 1) + 128;
487 for (j = 0; j < 9; j++) {
488 for (i = 0; i < 9; i++) {
489 src[i] = ((src[i] - 128) >> 1) + 128;
490 src2[i] = ((src2[i] - 128) >> 1) + 128;
502 for (j = 0; j < 17 + s->
mspel * 2; j++) {
504 for (i = 0; i < 17 + s->
mspel * 2; i++)
505 src[i] = luty[f][src[i]];
510 for (j = 0; j < 9; j++) {
512 for (i = 0; i < 9; i++) {
513 src[i] = lutuv[f][src[i]];
514 src2[i] = lutuv[f][src2[i]];
524 dxy = ((my & 3) << 2) | (mx & 3);
531 dxy = (my & 2) | ((mx & 2) >> 1);
540 uvmx = (uvmx & 3) << 1;
541 uvmy = (uvmy & 3) << 1;
554 if (c < d)
return (
FFMIN(b, d) +
FFMAX(a, c)) / 2;
557 if (c < d)
return (
FFMIN(a, d) +
FFMAX(b, c)) / 2;
568 int dxy, mx, my, src_x, src_y;
580 mx = s->
mv[dir][n][0];
581 my = s->
mv[dir][n][1];
610 int same_count = 0, opp_count = 0, k;
611 int chosen_mv[2][4][2], f;
613 for (k = 0; k < 4; k++) {
615 chosen_mv[f][f ? opp_count : same_count][0] = s->
mv[0][k][0];
616 chosen_mv[f][f ? opp_count : same_count][1] = s->
mv[0][k][1];
620 f = opp_count > same_count;
621 switch (f ? opp_count : same_count) {
623 tx =
median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
624 chosen_mv[f][2][0], chosen_mv[f][3][0]);
625 ty =
median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
626 chosen_mv[f][2][1], chosen_mv[f][3][1]);
629 tx =
mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
630 ty =
mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
633 tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
634 ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
639 for (k = 0; k < 4; k++)
651 qx = (s->
mb_x * 16) + (mx >> 2);
652 qy = (s->
mb_y * 8) + (my >> 3);
657 mx -= 4 * (qx -
width);
660 else if (qy > height + 1)
661 my -= 8 * (qy - height - 1);
665 off = ((n > 1) ? s->
linesize : 0) + (n & 1) * 8;
667 off = s->
linesize * 4 * (n & 2) + (n & 1) * 8;
669 src_x = s->
mb_x * 16 + (n & 1) * 8 + (mx >> 2);
671 src_y = s->
mb_y * 16 + (n & 2) * 4 + (my >> 2);
673 src_y = s->
mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
676 src_x = av_clip(src_x, -16, s->
mb_width * 16);
677 src_y = av_clip(src_y, -16, s->
mb_height * 16);
690 srcY += src_y * s->
linesize + src_x;
694 if (fieldmv && !(src_y & 1))
696 if (fieldmv && (src_y & 1) && src_y < 4)
701 || (
unsigned)(src_y - (s->
mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->
mspel * 2) << fieldmv)) {
706 9 + s->
mspel * 2, (9 + s->
mspel * 2) << fieldmv,
707 src_x - s->
mspel, src_y - (s->
mspel << fieldmv),
716 for (j = 0; j < 9 + s->
mspel * 2; j++) {
717 for (i = 0; i < 9 + s->
mspel * 2; i++)
718 src[i] = ((src[i] - 128) >> 1) + 128;
728 for (j = 0; j < 9 + s->
mspel * 2; j++) {
730 for (i = 0; i < 9 + s->
mspel * 2; i++)
731 src[i] = luty[f][src[i]];
739 dxy = ((my & 3) << 2) | (mx & 3);
745 dxy = (my & 2) | ((mx & 2) >> 1);
756 static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
758 idx = ((a[3] != flag) << 3)
759 | ((a[2] != flag) << 2)
760 | ((a[1] != flag) << 1)
763 *tx =
median4(mvx[0], mvx[1], mvx[2], mvx[3]);
764 *ty =
median4(mvy[0], mvy[1], mvy[2], mvy[3]);
766 }
else if (count[idx] == 1) {
769 *tx =
mid_pred(mvx[1], mvx[2], mvx[3]);
770 *ty =
mid_pred(mvy[1], mvy[2], mvy[3]);
773 *tx =
mid_pred(mvx[0], mvx[2], mvx[3]);
774 *ty =
mid_pred(mvy[0], mvy[2], mvy[3]);
777 *tx =
mid_pred(mvx[0], mvx[1], mvx[3]);
778 *ty =
mid_pred(mvy[0], mvy[1], mvy[3]);
781 *tx =
mid_pred(mvx[0], mvx[1], mvx[2]);
782 *ty =
mid_pred(mvy[0], mvy[1], mvy[2]);
785 }
else if (count[idx] == 2) {
787 for (i = 0; i < 3; i++)
792 for (i = t1 + 1; i < 4; i++)
797 *tx = (mvx[t1] + mvx[t2]) / 2;
798 *ty = (mvy[t1] + mvy[t2]) / 2;
813 int uvmx, uvmy, uvsrc_x, uvsrc_y;
814 int k, tx = 0, ty = 0;
815 int mvx[4], mvy[4], intra[4], mv_f[4];
827 for (k = 0; k < 4; k++) {
828 mvx[k] = s->
mv[dir][k][0];
829 mvy[k] = s->
mv[dir][k][1];
847 if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
849 valid_count =
get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
857 uvmx = (tx + ((tx & 3) == 3)) >> 1;
858 uvmy = (ty + ((ty & 3) == 3)) >> 1;
864 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
865 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
869 uvmy += 2 - 4 * chroma_ref_type;
871 uvsrc_x = s->
mb_x * 8 + (uvmx >> 2);
872 uvsrc_y = s->
mb_y * 8 + (uvmy >> 2);
875 uvsrc_x = av_clip(uvsrc_x, -8, s->
mb_width * 8);
876 uvsrc_y = av_clip(uvsrc_y, -8, s->
mb_height * 8);
910 if (chroma_ref_type) {
918 || (
unsigned)uvsrc_x > (s->
h_edge_pos >> 1) - 9
919 || (
unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
922 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
926 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
938 for (j = 0; j < 9; j++) {
939 for (i = 0; i < 9; i++) {
940 src[i] = ((src[i] - 128) >> 1) + 128;
941 src2[i] = ((src2[i] - 128) >> 1) + 128;
954 for (j = 0; j < 9; j++) {
955 int f = v->
field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
956 for (i = 0; i < 9; i++) {
957 src[i] = lutuv[f][src[i]];
958 src2[i] = lutuv[f][src2[i]];
967 uvmx = (uvmx & 3) << 1;
968 uvmy = (uvmy & 3) << 1;
985 int uvsrc_x, uvsrc_y;
986 int uvmx_field[4], uvmy_field[4];
989 static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
990 int v_dist = fieldmv ? 1 : 4;
1003 for (i = 0; i < 4; i++) {
1004 int d = i < 2 ? dir: dir2;
1005 tx = s->
mv[d][i][0];
1006 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
1007 ty = s->
mv[d][i][1];
1009 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1011 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1014 for (i = 0; i < 4; i++) {
1015 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->
uvlinesize : 0);
1016 uvsrc_x = s->
mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1017 uvsrc_y = s->
mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1021 if (i < 2 ? dir : dir2) {
1032 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1033 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1035 if (fieldmv && !(uvsrc_y & 1))
1037 if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1040 || s->
h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1041 || (unsigned)uvsrc_x > (s->
h_edge_pos >> 1) - 5
1042 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1045 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1049 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1061 for (j = 0; j < 5; j++) {
1062 int f = (uvsrc_y + (j << fieldmv)) & 1;
1063 for (i = 0; i < 5; i++) {
1064 src[i] = lutuv[f][src[i]];
1065 src2[i] = lutuv[f][src2[i]];
1103 #define GET_MQUANT() \
1104 if (v->dquantfrm) { \
1106 if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1107 if (v->dqbilevel) { \
1108 mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1110 mqdiff = get_bits(gb, 3); \
1112 mquant = v->pq + mqdiff; \
1114 mquant = get_bits(gb, 5); \
1117 if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1118 edges = 1 << v->dqsbedge; \
1119 else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1120 edges = (3 << v->dqsbedge) % 15; \
1121 else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1123 if ((edges&1) && !s->mb_x) \
1124 mquant = v->altpq; \
1125 if ((edges&2) && s->first_slice_line) \
1126 mquant = v->altpq; \
1127 if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1128 mquant = v->altpq; \
1129 if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1130 mquant = v->altpq; \
1131 if (!mquant || mquant > 31) { \
1132 av_log(v->s.avctx, AV_LOG_ERROR, \
1133 "Overriding invalid mquant %d\n", mquant); \
1145 #define GET_MVDATA(_dmv_x, _dmv_y) \
1146 index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1147 VC1_MV_DIFF_VLC_BITS, 2); \
1149 mb_has_coeffs = 1; \
1152 mb_has_coeffs = 0; \
1155 _dmv_x = _dmv_y = 0; \
1156 } else if (index == 35) { \
1157 _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1158 _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1159 } else if (index == 36) { \
1164 index1 = index % 6; \
1165 if (!s->quarter_sample && index1 == 5) val = 1; \
1167 if (size_table[index1] - val > 0) \
1168 val = get_bits(gb, size_table[index1] - val); \
1170 sign = 0 - (val&1); \
1171 _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1173 index1 = index / 6; \
1174 if (!s->quarter_sample && index1 == 5) val = 1; \
1176 if (size_table[index1] - val > 0) \
1177 val = get_bits(gb, size_table[index1] - val); \
1179 sign = 0 - (val & 1); \
1180 _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1184 int *dmv_y,
int *pred_flag)
1187 int extend_x = 0, extend_y = 0;
1191 const int* offs_tab;
1208 extend_x = extend_y = 1;
1217 *pred_flag = *dmv_y & 1;
1218 *dmv_y = (*dmv_y + *pred_flag) >> 1;
1220 *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1229 index1 = (index + 1) % 9;
1231 val =
get_bits(gb, index1 + extend_x);
1232 sign = 0 -(val & 1);
1233 *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1240 index1 = (index + 1) / 9;
1241 if (index1 > v->
numref) {
1243 sign = 0 - (val & 1);
1244 *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->
numref])) - sign;
1247 if (v->
numref && pred_flag)
1248 *pred_flag = index1 & 1;
1254 int scaledvalue, refdist;
1255 int scalesame1, scalesame2;
1256 int scalezone1_x, zone1offset_x;
1273 if (
FFABS(n) < scalezone1_x)
1274 scaledvalue = (n * scalesame1) >> 8;
1277 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1279 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1287 int scaledvalue, refdist;
1288 int scalesame1, scalesame2;
1289 int scalezone1_y, zone1offset_y;
1306 if (
FFABS(n) < scalezone1_y)
1307 scaledvalue = (n * scalesame1) >> 8;
1310 scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1312 scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1317 return av_clip(scaledvalue, -v->
range_y / 2 + 1, v->
range_y / 2);
1319 return av_clip(scaledvalue, -v->
range_y / 2, v->
range_y / 2 - 1);
1324 int scalezone1_x, zone1offset_x;
1325 int scaleopp1, scaleopp2, brfd;
1337 if (
FFABS(n) < scalezone1_x)
1338 scaledvalue = (n * scaleopp1) >> 8;
1341 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1343 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1351 int scalezone1_y, zone1offset_y;
1352 int scaleopp1, scaleopp2, brfd;
1364 if (
FFABS(n) < scalezone1_y)
1365 scaledvalue = (n * scaleopp1) >> 8;
1368 scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1370 scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1374 return av_clip(scaledvalue, -v->
range_y / 2 + 1, v->
range_y / 2);
1376 return av_clip(scaledvalue, -v->
range_y / 2, v->
range_y / 2 - 1);
1383 int brfd, scalesame;
1397 n = (n * scalesame >> 8) << hpel;
1404 int refdist, scaleopp;
1421 n = (n * scaleopp >> 8) << hpel;
1428 int mv1,
int r_x,
int r_y,
uint8_t* is_intra,
1429 int pred_flag,
int dir)
1432 int xy,
wrap, off = 0;
1436 int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1437 int opposite, a_f, b_f, c_f;
1438 int16_t field_predA[2];
1439 int16_t field_predB[2];
1440 int16_t field_predC[2];
1441 int a_valid, b_valid, c_valid;
1442 int hybridmv_thresh, y_bias = 0;
1490 off = (s->
mb_x > 0) ? -1 : 1;
1505 b_valid = a_valid && (s->
mb_width > 1);
1506 c_valid = s->
mb_x || (n == 1 || n == 3);
1508 a_valid = a_valid && !is_intra[xy -
wrap];
1509 b_valid = b_valid && !is_intra[xy - wrap + off];
1510 c_valid = c_valid && !is_intra[xy - 1];
1515 num_oppfield += a_f;
1516 num_samefield += 1 - a_f;
1517 field_predA[0] = A[0];
1518 field_predA[1] = A[1];
1520 field_predA[0] = field_predA[1] = 0;
1525 num_oppfield += b_f;
1526 num_samefield += 1 - b_f;
1527 field_predB[0] = B[0];
1528 field_predB[1] = B[1];
1530 field_predB[0] = field_predB[1] = 0;
1535 num_oppfield += c_f;
1536 num_samefield += 1 - c_f;
1537 field_predC[0] = C[0];
1538 field_predC[1] = C[1];
1540 field_predC[0] = field_predC[1] = 0;
1550 if (num_samefield <= num_oppfield)
1551 opposite = 1 - pred_flag;
1553 opposite = pred_flag;
1558 if (a_valid && !a_f) {
1559 field_predA[0] =
scaleforopp(v, field_predA[0], 0, dir);
1560 field_predA[1] =
scaleforopp(v, field_predA[1], 1, dir);
1562 if (b_valid && !b_f) {
1563 field_predB[0] =
scaleforopp(v, field_predB[0], 0, dir);
1564 field_predB[1] =
scaleforopp(v, field_predB[1], 1, dir);
1566 if (c_valid && !c_f) {
1567 field_predC[0] =
scaleforopp(v, field_predC[0], 0, dir);
1568 field_predC[1] =
scaleforopp(v, field_predC[1], 1, dir);
1573 if (a_valid && a_f) {
1574 field_predA[0] =
scaleforsame(v, n, field_predA[0], 0, dir);
1575 field_predA[1] =
scaleforsame(v, n, field_predA[1], 1, dir);
1577 if (b_valid && b_f) {
1578 field_predB[0] =
scaleforsame(v, n, field_predB[0], 0, dir);
1579 field_predB[1] =
scaleforsame(v, n, field_predB[1], 1, dir);
1581 if (c_valid && c_f) {
1582 field_predC[0] =
scaleforsame(v, n, field_predC[0], 0, dir);
1583 field_predC[1] =
scaleforsame(v, n, field_predC[1], 1, dir);
1590 px = field_predA[0];
1591 py = field_predA[1];
1592 }
else if (c_valid) {
1593 px = field_predC[0];
1594 py = field_predC[1];
1595 }
else if (b_valid) {
1596 px = field_predB[0];
1597 py = field_predB[1];
1603 if (num_samefield + num_oppfield > 1) {
1604 px =
mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1605 py =
mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1611 qx = (s->
mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1612 qy = (s->
mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1616 if (qx + px < -60) px = -60 - qx;
1617 if (qy + py < -60) py = -60 - qy;
1619 if (qx + px < -28) px = -28 - qx;
1620 if (qy + py < -28) py = -28 - qy;
1622 if (qx + px > X) px = X - qx;
1623 if (qy + py > Y) py = Y - qy;
1628 hybridmv_thresh = 32;
1629 if (a_valid && c_valid) {
1630 if (is_intra[xy - wrap])
1633 sum =
FFABS(px - field_predA[0]) +
FFABS(py - field_predA[1]);
1634 if (sum > hybridmv_thresh) {
1636 px = field_predA[0];
1637 py = field_predA[1];
1639 px = field_predC[0];
1640 py = field_predC[1];
1643 if (is_intra[xy - 1])
1646 sum =
FFABS(px - field_predC[0]) +
FFABS(py - field_predC[1]);
1647 if (sum > hybridmv_thresh) {
1649 px = field_predA[0];
1650 py = field_predA[1];
1652 px = field_predC[0];
1653 py = field_predC[1];
1682 int mvn,
int r_x,
int r_y,
uint8_t* is_intra,
int dir)
1685 int xy,
wrap, off = 0;
1686 int A[2],
B[2], C[2];
1688 int a_valid = 0, b_valid = 0, c_valid = 0;
1689 int field_a, field_b, field_c;
1690 int total_valid, num_samefield, num_oppfield;
1691 int pos_c, pos_b, n_adj;
1719 off = ((n == 0) || (n == 1)) ? 1 : -1;
1721 if (s->
mb_x || (n == 1) || (n == 3)) {
1741 B[0] = B[1] = C[0] = C[1] = 0;
1749 n_adj = (n & 2) | (n & 1);
1803 total_valid = a_valid + b_valid + c_valid;
1805 if (!s->
mb_x && !(n == 1 || n == 3)) {
1810 B[0] = B[1] = C[0] = C[1] = 0;
1817 if (total_valid >= 2) {
1820 }
else if (total_valid) {
1821 if (a_valid) { px = A[0]; py = A[1]; }
1822 if (b_valid) { px = B[0]; py = B[1]; }
1823 if (c_valid) { px = C[0]; py = C[1]; }
1829 field_a = (A[1] & 4) ? 1 : 0;
1833 field_b = (B[1] & 4) ? 1 : 0;
1837 field_c = (C[1] & 4) ? 1 : 0;
1841 num_oppfield = field_a + field_b + field_c;
1842 num_samefield = total_valid - num_oppfield;
1843 if (total_valid == 3) {
1844 if ((num_samefield == 3) || (num_oppfield == 3)) {
1847 }
else if (num_samefield >= num_oppfield) {
1850 px = !field_a ? A[0] : B[0];
1851 py = !field_a ? A[1] : B[1];
1853 px = field_a ? A[0] : B[0];
1854 py = field_a ? A[1] : B[1];
1856 }
else if (total_valid == 2) {
1857 if (num_samefield >= num_oppfield) {
1858 if (!field_a && a_valid) {
1861 }
else if (!field_b && b_valid) {
1864 }
else if (c_valid) {
1869 if (field_a && a_valid) {
1872 }
else if (field_b && b_valid) {
1875 }
else if (c_valid) {
1881 }
else if (total_valid == 1) {
1882 px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1883 py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1898 }
else if (mvn == 2) {
1901 s->
mv[dir][n + 1][0] = s->
mv[dir][n][0];
1902 s->
mv[dir][n + 1][1] = s->
mv[dir][n][1];
1913 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1921 mx = s->
mv[1][0][0];
1922 my = s->
mv[1][0][1];
1923 uvmx = (mx + ((mx & 3) == 3)) >> 1;
1924 uvmy = (my + ((my & 3) == 3)) >> 1;
1931 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1932 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1938 src_x = s->
mb_x * 16 + (mx >> 2);
1939 src_y = s->
mb_y * 16 + (my >> 2);
1940 uvsrc_x = s->
mb_x * 8 + (uvmx >> 2);
1941 uvsrc_y = s->
mb_y * 8 + (uvmy >> 2);
1944 src_x = av_clip( src_x, -16, s->
mb_width * 16);
1945 src_y = av_clip( src_y, -16, s->
mb_height * 16);
1946 uvsrc_x = av_clip(uvsrc_x, -8, s->
mb_width * 8);
1947 uvsrc_y = av_clip(uvsrc_y, -8, s->
mb_height * 8);
1955 srcY += src_y * s->
linesize + src_x;
1972 || (
unsigned)(src_x - 1) > s->
h_edge_pos - (mx & 3) - 16 - 3
1973 || (
unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1986 uvsrc_x, uvsrc_y, s->
h_edge_pos >> 1, v_edge_pos >> 1);
1990 uvsrc_x, uvsrc_y, s->
h_edge_pos >> 1, v_edge_pos >> 1);
1999 for (j = 0; j < 17 + s->
mspel * 2; j++) {
2000 for (i = 0; i < 17 + s->
mspel * 2; i++)
2001 src[i] = ((src[i] - 128) >> 1) + 128;
2006 for (j = 0; j < 9; j++) {
2007 for (i = 0; i < 9; i++) {
2008 src[i] = ((src[i] - 128) >> 1) + 128;
2009 src2[i] = ((src2[i] - 128) >> 1) + 128;
2023 for (j = 0; j < 17 + s->
mspel * 2; j++) {
2025 for (i = 0; i < 17 + s->
mspel * 2; i++)
2026 src[i] = luty[f][src[i]];
2031 for (j = 0; j < 9; j++) {
2033 for (i = 0; i < 9; i++) {
2034 src[i] = lutuv[f][src[i]];
2035 src2[i] = lutuv[f][src2[i]];
2048 dxy = ((my & 3) << 2) | (mx & 3);
2055 dxy = (my & 2) | ((mx & 2) >> 1);
2065 uvmx = (uvmx & 3) << 1;
2066 uvmy = (uvmy & 3) << 1;
2080 #if B_FRACTION_DEN==256
2084 return 2 * ((value * n + 255) >> 9);
2085 return (value * n + 128) >> 8;
2098 int direct,
int mode)
2115 int direct,
int mvtype)
2118 int xy,
wrap, off = 0;
2150 s->
mv[0][0][0] = av_clip(s->
mv[0][0][0], -60 - (s->
mb_x << 6), (s->
mb_width << 6) - 4 - (s->
mb_x << 6));
2151 s->
mv[0][0][1] = av_clip(s->
mv[0][0][1], -60 - (s->
mb_y << 6), (s->
mb_height << 6) - 4 - (s->
mb_y << 6));
2152 s->
mv[1][0][0] = av_clip(s->
mv[1][0][0], -60 - (s->
mb_x << 6), (s->
mb_width << 6) - 4 - (s->
mb_x << 6));
2153 s->
mv[1][0][1] = av_clip(s->
mv[1][0][1], -60 - (s->
mb_y << 6), (s->
mb_height << 6) - 4 - (s->
mb_y << 6));
2169 if (!s->
mb_x) C[0] = C[1] = 0;
2178 }
else if (s->
mb_x) {
2188 qx = (s->
mb_x << 5);
2189 qy = (s->
mb_y << 5);
2192 if (qx + px < -28) px = -28 - qx;
2193 if (qy + py < -28) py = -28 - qy;
2194 if (qx + px > X) px = X - qx;
2195 if (qy + py > Y) py = Y - qy;
2197 qx = (s->
mb_x << 6);
2198 qy = (s->
mb_y << 6);
2201 if (qx + px < -60) px = -60 - qx;
2202 if (qy + py < -60) py = -60 - qy;
2203 if (qx + px > X) px = X - qx;
2204 if (qy + py > Y) py = Y - qy;
2209 if (is_intra[xy - wrap])
2222 if (is_intra[xy - 2])
2238 s->
mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2239 s->
mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2257 }
else if (s->
mb_x) {
2267 qx = (s->
mb_x << 5);
2268 qy = (s->
mb_y << 5);
2271 if (qx + px < -28) px = -28 - qx;
2272 if (qy + py < -28) py = -28 - qy;
2273 if (qx + px > X) px = X - qx;
2274 if (qy + py > Y) py = Y - qy;
2276 qx = (s->
mb_x << 6);
2277 qy = (s->
mb_y << 6);
2280 if (qx + px < -60) px = -60 - qx;
2281 if (qy + py < -60) py = -60 - qy;
2282 if (qx + px > X) px = X - qx;
2283 if (qy + py > Y) py = Y - qy;
2288 if (is_intra[xy - wrap])
2301 if (is_intra[xy - 2])
2318 s->
mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2319 s->
mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2334 int total_opp, k, f;
2349 f = (total_opp > 2) ? 1 : 0;
2351 s->
mv[0][0][0] = s->
mv[0][0][1] = 0;
2352 s->
mv[1][0][0] = s->
mv[1][0][1] = 0;
2356 for (k = 0; k < 4; k++) {
2373 if (n == 3 || mv1) {
2378 if (n == 3 || mv1) {
2394 int16_t **dc_val_ptr,
int *dir_ptr)
2398 static const uint16_t dcpred[32] = {
2399 -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2400 114, 102, 93, 85, 79, 73, 68, 64,
2401 60, 57, 54, 51, 49, 47, 45, 43,
2402 41, 39, 38, 37, 35, 34, 33
2416 b = dc_val[ - 1 -
wrap];
2417 a = dc_val[ -
wrap];
2419 if (pq < 9 || !overlap) {
2422 b = a = dcpred[scale];
2423 if (s->
mb_x == 0 && (n != 1 && n != 3))
2424 b = c = dcpred[scale];
2429 if (s->
mb_x == 0 && (n != 1 && n != 3))
2433 if (abs(a - b) <= abs(b - c)) {
2442 *dc_val_ptr = &dc_val[0];
2459 int a_avail,
int c_avail,
2460 int16_t **dc_val_ptr,
int *dir_ptr)
2475 b = dc_val[ - 1 -
wrap];
2476 a = dc_val[ -
wrap];
2480 if (dqscale_index < 0)
2482 if (c_avail && (n != 1 && n != 3)) {
2487 if (a_avail && (n != 2 && n != 3)) {
2492 if (a_avail && c_avail && (n != 3)) {
2503 if (a_avail && c_avail) {
2504 if (abs(a - b) <= abs(b - c)) {
2511 }
else if (a_avail) {
2514 }
else if (c_avail) {
2523 *dc_val_ptr = &dc_val[0];
2572 int *value,
int codingset)
2638 int coded,
int codingset)
2642 int dc_pred_dir = 0;
2645 int16_t *ac_val, *ac_val2;
2659 if (dcdiff == 119 ) {
2662 else if (v->
pq == 2) dcdiff =
get_bits(gb, 9);
2666 dcdiff = (dcdiff << 2) +
get_bits(gb, 2) - 3;
2667 else if (v->
pq == 2)
2668 dcdiff = (dcdiff << 1) +
get_bits1(gb) - 1;
2693 int last = 0, skip, value;
2720 block[zz_table[i++]] = value;
2726 for (k = 1; k < 8; k++)
2729 for (k = 1; k < 8; k++)
2734 for (k = 1; k < 8; k++) {
2740 for (k = 1; k < 64; k++)
2744 block[k] += (block[k] < 0) ? -v->
pq : v->
pq;
2758 memset(ac_val2, 0, 16 * 2);
2762 memcpy(ac_val2, ac_val, 8 * 2);
2766 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2772 for (k = 1; k < 8; k++) {
2778 for (k = 1; k < 8; k++) {
2779 block[k << v->
top_blk_sh] = ac_val[k + 8] * scale;
2801 int coded,
int codingset,
int mquant)
2805 int dc_pred_dir = 0;
2808 int16_t *ac_val, *ac_val2;
2827 if (dcdiff == 119 ) {
2829 if (mquant == 1) dcdiff =
get_bits(gb, 10);
2830 else if (mquant == 2) dcdiff =
get_bits(gb, 9);
2834 dcdiff = (dcdiff << 2) +
get_bits(gb, 2) - 3;
2835 else if (mquant == 2)
2836 dcdiff = (dcdiff << 1) +
get_bits1(gb) - 1;
2857 if (!a_avail && !c_avail)
2862 scale = mquant * 2 + ((mquant == v->
pq) ? v->
halfpq : 0);
2870 if ( dc_pred_dir && c_avail && mb_pos)
2872 if (!dc_pred_dir && a_avail && mb_pos >= s->
mb_stride)
2874 if ( dc_pred_dir && n == 1)
2876 if (!dc_pred_dir && n == 2)
2882 int last = 0, skip, value;
2907 block[zz_table[i++]] = value;
2913 if (q2 && q1 != q2) {
2914 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
2915 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
2920 for (k = 1; k < 8; k++)
2923 for (k = 1; k < 8; k++)
2928 for (k = 1; k < 8; k++)
2931 for (k = 1; k < 8; k++)
2937 for (k = 1; k < 8; k++) {
2943 for (k = 1; k < 64; k++)
2947 block[k] += (block[k] < 0) ? -mquant : mquant;
2950 if (use_pred) i = 63;
2954 memset(ac_val2, 0, 16 * 2);
2957 memcpy(ac_val2, ac_val, 8 * 2);
2958 if (q2 && q1 != q2) {
2959 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
2960 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
2963 for (k = 1; k < 8; k++)
2964 ac_val2[k] = (ac_val2[k] * q2 *
ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2969 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2970 if (q2 && q1 != q2) {
2971 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
2972 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
2975 for (k = 1; k < 8; k++)
2976 ac_val2[k + 8] = (ac_val2[k + 8] * q2 *
ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2984 for (k = 1; k < 8; k++) {
2990 for (k = 1; k < 8; k++) {
2991 block[k << v->
top_blk_sh] = ac_val2[k + 8] * scale;
3013 int coded,
int mquant,
int codingset)
3017 int dc_pred_dir = 0;
3020 int16_t *ac_val, *ac_val2;
3031 mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3048 if (dcdiff == 119 ) {
3050 if (mquant == 1) dcdiff =
get_bits(gb, 10);
3051 else if (mquant == 2) dcdiff =
get_bits(gb, 9);
3055 dcdiff = (dcdiff << 2) +
get_bits(gb, 2) - 3;
3056 else if (mquant == 2)
3057 dcdiff = (dcdiff << 1) +
get_bits1(gb) - 1;
3064 dcdiff +=
vc1_pred_dc(&v->
s, v->
overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3079 if (!a_avail) dc_pred_dir = 1;
3080 if (!c_avail) dc_pred_dir = 0;
3081 if (!a_avail && !c_avail) use_pred = 0;
3085 scale = mquant * 2 + v->
halfpq;
3093 if (dc_pred_dir && c_avail && mb_pos)
3095 if (!dc_pred_dir && a_avail && mb_pos >= s->
mb_stride)
3097 if ( dc_pred_dir && n == 1)
3099 if (!dc_pred_dir && n == 2)
3101 if (n == 3) q2 = q1;
3104 int last = 0, skip, value;
3113 block[v->
zz_8x8[0][i++]] = value;
3117 block[v->
zz_8x8[2][i++]] = value;
3119 block[v->
zz_8x8[3][i++]] = value;
3121 block[v->
zzi_8x8[i++]] = value;
3129 if (q2 && q1 != q2) {
3130 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
3131 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
3136 for (k = 1; k < 8; k++)
3139 for (k = 1; k < 8; k++)
3144 for (k = 1; k < 8; k++)
3147 for (k = 1; k < 8; k++)
3153 for (k = 1; k < 8; k++) {
3159 for (k = 1; k < 64; k++)
3163 block[k] += (block[k] < 0) ? -mquant : mquant;
3166 if (use_pred) i = 63;
3170 memset(ac_val2, 0, 16 * 2);
3173 memcpy(ac_val2, ac_val, 8 * 2);
3174 if (q2 && q1 != q2) {
3175 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
3176 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
3179 for (k = 1; k < 8; k++)
3180 ac_val2[k] = (ac_val2[k] * q2 *
ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3185 memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3186 if (q2 && q1 != q2) {
3187 q1 = q1 * 2 + ((q1 == v->
pq) ? v->
halfpq : 0) - 1;
3188 q2 = q2 * 2 + ((q2 == v->
pq) ? v->
halfpq : 0) - 1;
3191 for (k = 1; k < 8; k++)
3192 ac_val2[k + 8] = (ac_val2[k + 8] * q2 *
ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3200 for (k = 1; k < 8; k++) {
3206 for (k = 1; k < 8; k++) {
3207 block[k << v->
top_blk_sh] = ac_val2[k + 8] * scale;
3223 int mquant,
int ttmb,
int first_block,
3224 uint8_t *dst,
int linesize,
int skip_block,
3231 int scale, off, idx, last, skip, value;
3232 int ttblk = ttmb & 7;
3244 && ((v->
ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3254 scale = 2 * mquant + ((v->
pq == mquant) ? v->
halfpq : 0);
3279 block[idx] = value * scale;
3281 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3293 pat = ~subblkpat & 0xF;
3294 for (j = 0; j < 4; j++) {
3295 last = subblkpat & (1 << (3 - j));
3297 off = (j & 1) * 4 + (j & 2) * 16;
3307 block[idx + off] = value * scale;
3309 block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3311 if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3320 pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3321 for (j = 0; j < 2; j++) {
3322 last = subblkpat & (1 << (1 - j));
3331 idx = v->
zz_8x4[i++] + off;
3334 block[idx] = value * scale;
3336 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3338 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3347 pat = ~(subblkpat * 5) & 0xF;
3348 for (j = 0; j < 2; j++) {
3349 last = subblkpat & (1 << (1 - j));
3358 idx = v->
zz_4x8[i++] + off;
3361 block[idx] = value * scale;
3363 block[idx] += (block[idx] < 0) ? -mquant : mquant;
3365 if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3375 *ttmb_out |= ttblk << (n * 4);
3388 block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3390 block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3394 if (block_num > 3) {
3395 dst = s->
dest[block_num - 3];
3397 dst = s->
dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3403 if (block_num > 3) {
3404 bottom_cbp = v->
cbp[s->
mb_x] >> (block_num * 4);
3405 bottom_is_intra = v->
is_intra[s->
mb_x] >> (block_num * 4);
3409 bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3410 : (v->
cbp[s->
mb_x] >> ((block_num - 2) * 4));
3411 bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3417 if (bottom_is_intra & 1 || block_is_intra & 1 ||
3418 mv[0][0] !=
mv[mv_stride][0] ||
mv[0][1] !=
mv[mv_stride][1]) {
3421 idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3433 dst -= 4 * linesize;
3436 idx = (block_cbp | (block_cbp >> 2)) & 3;
3452 block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3454 block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3458 if (block_num > 3) {
3459 dst = s->
dest[block_num - 3] - 8 * linesize;
3461 dst = s->
dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3467 if (block_num > 3) {
3472 right_cbp = (block_num & 1) ? (v->
cbp[s->
mb_x - s->
mb_stride] >> ((block_num - 1) * 4))
3473 : (mb_cbp >> ((block_num + 1) * 4));
3475 : (mb_is_intra >> ((block_num + 1) * 4));
3478 if (block_is_intra & 1 || right_is_intra & 1 ||
mv[0][0] !=
mv[1][0] ||
mv[0][1] !=
mv[1][1]) {
3481 idx = ((right_cbp >> 1) | block_cbp) & 5;
3496 idx = (block_cbp | (block_cbp >> 1)) & 5;
3513 for (i = 0; i < 6; i++) {
3520 for (i = 0; i < 6; i++) {
3526 for (i = 0; i < 6; i++) {
3543 int ttmb = v->
ttfrm;
3545 int mb_has_coeffs = 1;
3549 int first_block = 1;
3551 int skipped, fourmv;
3552 int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3577 if (s->
mb_intra && !mb_has_coeffs) {
3581 }
else if (mb_has_coeffs) {
3597 for (i = 0; i < 6; i++) {
3600 val = ((cbp >> (5 - i)) & 1);
3601 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
3608 if (i == 1 || i == 3 || s->
mb_x)
3617 for (j = 0; j < 64; j++)
3618 s->
block[i][j] <<= 1;
3620 s->
dest[dst_idx] + off,
3629 block_cbp |= 0xF << (i << 2);
3630 block_intra |= 1 << i;
3635 block_cbp |= pat << (i << 2);
3636 if (!v->
ttmbf && ttmb < 8)
3643 for (i = 0; i < 6; i++) {
3654 int intra_count = 0, coded_inter = 0;
3655 int is_intra[6], is_coded[6];
3658 for (i = 0; i < 6; i++) {
3659 val = ((cbp >> (5 - i)) & 1);
3674 is_coded[i] = mb_has_coeffs;
3677 is_intra[i] = (intra_count >= 3);
3684 coded_inter = !is_intra[i] & is_coded[i];
3688 if (!intra_count && !coded_inter)
3695 for (i = 0; i < 6; i++)
3708 if (!v->
ttmbf && coded_inter)
3710 for (i = 0; i < 6; i++) {
3712 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
3719 if (i == 1 || i == 3 || s->
mb_x)
3728 for (j = 0; j < 64; j++)
3729 s->
block[i][j] <<= 1;
3731 s->
dest[dst_idx] + off,
3740 block_cbp |= 0xF << (i << 2);
3741 block_intra |= 1 << i;
3742 }
else if (is_coded[i]) {
3744 first_block, s->
dest[dst_idx] + off,
3748 block_cbp |= pat << (i << 2);
3749 if (!v->
ttmbf && ttmb < 8)
3757 for (i = 0; i < 6; i++) {
3761 for (i = 0; i < 4; i++) {
3787 int ttmb = v->
ttfrm;
3789 int mb_has_coeffs = 1;
3792 int first_block = 1;
3794 int skipped, fourmv = 0, twomv = 0;
3795 int block_cbp = 0, pat, block_tt = 0;
3796 int idx_mbmode = 0, mvbp;
3797 int stride_y, fieldtx;
3841 for (i = 0; i < 4; i++) {
3847 for (i = 0; i < 6; i++)
3860 for (i = 0; i < 6; i++) {
3863 val = ((cbp >> (5 - i)) & 1);
3868 if (i == 1 || i == 3 || s->
mb_x)
3877 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->
linesize : (i & 1) * 8 + 4 * (i & 2) * s->
linesize;
3883 s->
dest[dst_idx] + off,
3901 for (i = 0; i < 6; i++)
3908 for (i = 0; i < 6; i++) {
3911 val = ((mvbp >> (3 - i)) & 1);
3917 }
else if (i == 4) {
3950 if (!v->
ttmbf && cbp)
3952 for (i = 0; i < 6; i++) {
3955 val = ((cbp >> (5 - i)) & 1);
3957 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
3959 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->
linesize));
3962 first_block, s->
dest[dst_idx] + off,
3965 block_cbp |= pat << (i << 2);
3966 if (!v->
ttmbf && ttmb < 8)
3974 for (i = 0; i < 6; i++) {
4000 int ttmb = v->
ttfrm;
4002 int mb_has_coeffs = 1;
4005 int first_block = 1;
4008 int block_cbp = 0, pat, block_tt = 0;
4014 if (idx_mbmode <= 1) {
4025 mb_has_coeffs = idx_mbmode & 1;
4029 for (i = 0; i < 6; i++) {
4033 val = ((cbp >> (5 - i)) & 1);
4037 if (i == 1 || i == 3 || s->
mb_x)
4045 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
4047 s->
dest[dst_idx] + off,
4056 if (idx_mbmode <= 5) {
4057 dmv_x = dmv_y = pred_flag = 0;
4058 if (idx_mbmode & 1) {
4063 mb_has_coeffs = !(idx_mbmode & 2);
4066 for (i = 0; i < 6; i++) {
4068 dmv_x = dmv_y = pred_flag = 0;
4069 val = ((v->
fourmvbp >> (3 - i)) & 1);
4078 mb_has_coeffs = idx_mbmode & 1;
4086 if (!v->
ttmbf && cbp) {
4090 for (i = 0; i < 6; i++) {
4093 val = ((cbp >> (5 - i)) & 1);
4094 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->
linesize;
4097 first_block, s->
dest[dst_idx] + off,
4101 block_cbp |= pat << (i << 2);
4102 if (!v->
ttmbf && ttmb < 8) ttmb = -1;
4122 int ttmb = v->
ttfrm;
4123 int mb_has_coeffs = 0;
4126 int first_block = 1;
4128 int skipped, direct;
4129 int dmv_x[2], dmv_y[2];
4144 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4145 for (i = 0; i < 6; i++) {
4154 dmv_x[1] = dmv_x[0];
4155 dmv_y[1] = dmv_y[0];
4168 dmv_x[0] = dmv_y[0] = 0;
4172 for (i = 0; i < 6; i++)
4179 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4189 dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4191 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4193 if (!mb_has_coeffs && !s->
mb_intra) {
4196 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4199 if (s->
mb_intra && !mb_has_coeffs) {
4208 if (!mb_has_coeffs) {
4211 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4217 vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4229 for (i = 0; i < 6; i++) {
4232 val = ((cbp >> (5 - i)) & 1);
4233 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
4240 if (i == 1 || i == 3 || s->
mb_x)
4249 for (j = 0; j < 64; j++)
4250 s->
block[i][j] <<= 1;
4252 s->
dest[dst_idx] + off,
4257 first_block, s->
dest[dst_idx] + off,
4260 if (!v->
ttmbf && ttmb < 8)
4277 int ttmb = v->
ttfrm;
4278 int mb_has_coeffs = 0;
4280 int first_block = 1;
4283 int dmv_x[2], dmv_y[2], pred_flag[2];
4285 int idx_mbmode, interpmvp;
4291 if (idx_mbmode <= 1) {
4302 mb_has_coeffs = idx_mbmode & 1;
4306 for (i = 0; i < 6; i++) {
4309 val = ((cbp >> (5 - i)) & 1);
4314 if (i == 1 || i == 3 || s->
mb_x)
4323 for (j = 0; j < 64; j++)
4324 s->
block[i][j] <<= 1;
4325 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
4327 s->
dest[dst_idx] + off,
4340 if (idx_mbmode <= 5) {
4341 dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4342 pred_flag[0] = pred_flag[1] = 0;
4367 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4368 dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4372 mb_has_coeffs = !(idx_mbmode & 2);
4378 for (i = 0; i < 6; i++) {
4380 dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4381 dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4382 val = ((v->
fourmvbp >> (3 - i)) & 1);
4393 mb_has_coeffs = idx_mbmode & 1;
4401 if (!v->
ttmbf && cbp) {
4405 for (i = 0; i < 6; i++) {
4408 val = ((cbp >> (5 - i)) & 1);
4409 off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->
linesize;
4412 first_block, s->
dest[dst_idx] + off,
4415 if (!v->
ttmbf && ttmb < 8)
4433 int ttmb = v->
ttfrm;
4435 int mb_has_coeffs = 1;
4438 int first_block = 1;
4440 int skipped, direct, twomv = 0;
4441 int block_cbp = 0, pat, block_tt = 0;
4442 int idx_mbmode = 0, mvbp;
4443 int stride_y, fieldtx;
4487 for (i = 1; i < 4; i += 2) {
4494 for (i = 1; i < 4; i++) {
4504 for (i = 0; i < 4; i++) {
4512 for (i = 0; i < 6; i++)
4525 for (i = 0; i < 6; i++) {
4528 val = ((cbp >> (5 - i)) & 1);
4533 if (i == 1 || i == 3 || s->
mb_x)
4543 off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->
linesize : (i & 1) * 8 + 4 * (i & 2) * s->
linesize;
4549 s->
dest[dst_idx] + off,
4585 for (i = 0; i < 6; i++)
4592 for (i = 0; i < 4; i++) {
4604 for (i = 0; i < 4; i++) {
4607 val = ((mvbp >> (3 - i)) & 1);
4650 for (i = 0; i < 2; i++) {
4680 for (i = 0; i < 2; i++) {
4690 if (!v->
ttmbf && cbp)
4692 for (i = 0; i < 6; i++) {
4695 val = ((cbp >> (5 - i)) & 1);
4697 off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->
linesize);
4699 off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->
linesize));
4702 first_block, s->
dest[dst_idx] + off,
4705 block_cbp |= pat << (i << 2);
4706 if (!v->
ttmbf && ttmb < 8)
4714 for (i = 0; i < 6; i++) {
4736 for (i = 0; i < 2; i++) {
4748 for (i = 0; i < 2; i++) {
4818 dst[0] = s->
dest[0];
4819 dst[1] = dst[0] + 8;
4821 dst[3] = dst[2] + 8;
4822 dst[4] = s->
dest[1];
4823 dst[5] = s->
dest[2];
4835 for (k = 0; k < 6; k++) {
4836 val = ((cbp >> (5 - k)) & 1);
4843 cbp |= val << (5 - k);
4852 for (j = 0; j < 64; j++)
4853 s->
block[k][j] <<= 1;
4859 for (j = 0; j < 64; j++)
4993 for (k = 0; k < 6; k++) {
4994 val = ((cbp >> (5 - k)) & 1);
5001 cbp |= val << (5 - k);
5111 if (apply_loop_filter) {
5247 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5263 int effect_type, effect_flag;
5264 int effect_pcount1, effect_pcount2;
5265 int effect_params1[15], effect_params2[10];
5273 static void vc1_sprite_parse_transform(
GetBitContext* gb,
int c[7])
5280 c[2] = get_fp_val(gb);
5284 c[0] = c[4] = get_fp_val(gb);
5285 c[2] = get_fp_val(gb);
5288 c[0] = get_fp_val(gb);
5289 c[2] = get_fp_val(gb);
5290 c[4] = get_fp_val(gb);
5293 c[0] = get_fp_val(gb);
5294 c[1] = get_fp_val(gb);
5295 c[2] = get_fp_val(gb);
5296 c[3] = get_fp_val(gb);
5297 c[4] = get_fp_val(gb);
5300 c[5] = get_fp_val(gb);
5302 c[6] = get_fp_val(gb);
5312 for (sprite = 0; sprite <= v->
two_sprites; sprite++) {
5313 vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5314 if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5317 for (i = 0; i < 7; i++)
5319 sd->coefs[sprite][i] / (1<<16),
5320 (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5326 switch (sd->effect_pcount1 =
get_bits(gb, 4)) {
5328 vc1_sprite_parse_transform(gb, sd->effect_params1);
5331 vc1_sprite_parse_transform(gb, sd->effect_params1);
5332 vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5335 for (i = 0; i < sd->effect_pcount1; i++)
5336 sd->effect_params1[i] = get_fp_val(gb);
5338 if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5341 for (i = 0; i < sd->effect_pcount1; i++)
5343 sd->effect_params1[i] / (1 << 16),
5344 (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5348 sd->effect_pcount2 =
get_bits(gb, 16);
5349 if (sd->effect_pcount2 > 10) {
5352 }
else if (sd->effect_pcount2) {
5355 while (++i < sd->effect_pcount2) {
5356 sd->effect_params2[i] = get_fp_val(gb);
5358 sd->effect_params2[i] / (1 << 16),
5359 (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5374 static void vc1_draw_sprites(
VC1Context *v, SpriteData* sd)
5376 int i, plane, row, sprite;
5377 int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5379 int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5383 for (i = 0; i < 2; i++) {
5384 xoff[i] = av_clip(sd->coefs[i][2], 0, v->
sprite_width-1 << 16);
5385 xadv[i] = sd->coefs[i][0];
5389 yoff[i] = av_clip(sd->coefs[i][5], 0, v->
sprite_height-1 << 16);
5392 alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5401 for (sprite = 0; sprite <= v->
two_sprites; sprite++) {
5404 int ycoord = yoff[sprite] + yadv[sprite] * row;
5405 int yline = ycoord >> 16;
5407 ysub[sprite] = ycoord & 0xFFFF;
5413 if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5414 src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5416 src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5418 if (sr_cache[sprite][0] != yline) {
5419 if (sr_cache[sprite][1] == yline) {
5421 FFSWAP(
int, sr_cache[sprite][0], sr_cache[sprite][1]);
5423 v->
vc1dsp.
sprite_h(v->
sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5424 sr_cache[sprite][0] = yline;
5427 if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5429 iplane + next_line, xoff[sprite],
5430 xadv[sprite], width);
5431 sr_cache[sprite][1] = yline + 1;
5433 src_h[sprite][0] = v->
sr_rows[sprite][0];
5434 src_h[sprite][1] = v->
sr_rows[sprite][1];
5442 memcpy(dst, src_h[0][0], width);
5445 if (ysub[0] && ysub[1]) {
5447 src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5448 }
else if (ysub[0]) {
5450 src_h[1][0], alpha, width);
5451 }
else if (ysub[1]) {
5453 src_h[0][0], (1<<16)-1-alpha, width);
5461 for (i = 0; i < 2; i++) {
5477 vc1_parse_sprites(v, gb, &sd);
5495 vc1_draw_sprites(v, &sd);
5511 if (f && f->
data[0])
5515 plane ? 128 : 0, f->
linesize[plane]);
5572 for (i = 0; i < 4; i++)
5598 for (i = 0; i < 64; i++) {
5599 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5655 }
else if (count < 0) {
5662 int size, buf2_size;
5664 int seq_initialized = 0, ep_initialized = 0;
5674 for (; next < end; start = next) {
5676 size = next - start - 4;
5687 seq_initialized = 1;
5699 if (!seq_initialized || !ep_initialized) {
5753 for (i = 0; i < 4; i++)
5785 int buf_size = avpkt->
size, n_slices = 0, i, ret;
5790 const uint8_t *buf_start = buf;
5791 int mb_height, n_slices1;
5796 } *slices =
NULL, *tmp;
5818 const uint8_t *start, *end, *next;
5822 for (start = buf, end = buf + buf_size; next < end; start = next) {
5824 size = next - start - 4;
5825 if (size <= 0)
continue;
5834 tmp =
av_realloc(slices,
sizeof(*slices) * (n_slices+1));
5839 if (!slices[n_slices].buf)
5842 slices[n_slices].buf);
5847 slices[n_slices].mby_start = s->
mb_height + 1 >> 1;
5848 n_slices1 = n_slices - 1;
5859 tmp =
av_realloc(slices,
sizeof(*slices) * (n_slices+1));
5864 if (!slices[n_slices].buf)
5867 slices[n_slices].buf);
5870 slices[n_slices].mby_start =
get_bits(&slices[n_slices].gb, 9);
5876 }
else if (v->
interlace && ((buf[0] & 0xC0) == 0xC0)) {
5885 tmp =
av_realloc(slices,
sizeof(*slices) * (n_slices+1));
5890 if (!slices[n_slices].buf)
5892 buf_size3 =
vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5895 slices[n_slices].mby_start = s->
mb_height + 1 >> 1;
5896 n_slices1 = n_slices - 1;
6019 v->
bits = buf_size * 8;
6035 for (i = 0; i <= n_slices; i++) {
6036 if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6039 "picture boundary (%d >= %d)\n", i,
6040 slices[i - 1].mby_start, mb_height);
6072 s->
start_mb_y = (i == 0) ? 0 :
FFMAX(0, slices[i-1].mby_start % mb_height);
6074 s->
end_mb_y = (i == n_slices ) ? mb_height :
FFMIN(mb_height, slices[i].mby_start % mb_height);
6076 s->
end_mb_y = (i <= n_slices1 + 1) ? mb_height :
FFMIN(mb_height, slices[i].mby_start % mb_height);
6085 s->
gb = slices[i].gb;
6115 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6116 if (vc1_decode_sprites(v, &s->
gb))
6138 for (i = 0; i < n_slices; i++)
6145 for (i = 0; i < n_slices; i++)
6161 #if CONFIG_VC1_DXVA2_HWACCEL
6164 #if CONFIG_VC1_VAAPI_HWACCEL
6167 #if CONFIG_VC1_VDPAU_HWACCEL
6185 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6189 #if CONFIG_WMV3_DECODER
6201 .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6206 #if CONFIG_WMV3IMAGE_DECODER
6207 AVCodec ff_wmv3image_decoder = {
6208 .
name =
"wmv3image",
6217 .
flush = vc1_sprite_flush,
6225 #if CONFIG_VC1IMAGE_DECODER
6226 AVCodec ff_vc1image_decoder = {
6236 .
flush = vc1_sprite_flush,
static void vc1_smooth_overlap_filter_iblk(VC1Context *v)
in the bitstream is reported as 00b
const int ff_vc1_ttblk_to_tt[3][8]
Table for conversion between TTBLK and TTMB.
op_pixels_func avg_vc1_mspel_pixels_tab[16]
#define VC1_TTBLK_VLC_BITS
void(* vc1_h_overlap)(uint8_t *src, int stride)
const struct AVCodec * codec
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n, int coded, int codingset)
Decode intra block in intra frames - should be faster than decode_intra_block.
discard all frames except keyframes
void ff_init_block_index(MpegEncContext *s)
#define VC1_IF_MBMODE_VLC_BITS
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
qpel_mc_func avg_qpel_pixels_tab[2][16]
static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n, int mquant, int ttmb, int first_block, uint8_t *dst, int linesize, int skip_block, int *ttmb_out)
Decode P block.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
This structure describes decoded (raw) audio or video data.
#define VC1_ICBPCY_VLC_BITS
static int vc1_decode_p_mb(VC1Context *v)
Decode one P-frame MB.
int k_x
Number of bits for MVs (depends on MV range)
void(* vc1_inv_trans_8x4)(uint8_t *dest, int line_size, int16_t *block)
int reffield
if numref = 0 (1 reference) then reffield decides which
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
void(* vc1_inv_trans_4x8)(uint8_t *dest, int line_size, int16_t *block)
void(* clear_block)(int16_t *block)
int coded_width
Bitstream width / height, may be different from width/height e.g.
int mv_type_is_raw
mv type mb plane is not coded
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
static av_always_inline int scaleforsame(VC1Context *v, int i, int n, int dim, int dir)
uint8_t dmvrange
Frame decoding info for interlaced picture.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
static av_always_inline int scaleforopp_y(VC1Context *v, int n, int dir)
static const uint8_t vc1_index_decode_table[AC_MODES][185][2]
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int16_t(*[3] ac_val)[16]
used for for mpeg4 AC prediction, all 3 arrays must be continuous
static const int vc1_last_decode_table[AC_MODES]
int tt_index
Index for Transform Type tables (to decode TTMB)
static void vc1_decode_p_blocks(VC1Context *v)
static void vc1_put_signed_blocks_clamped(VC1Context *v)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
#define GET_MVDATA(_dmv_x, _dmv_y)
Get MV differentials.
#define VC1_2REF_MVDATA_VLC_BITS
void ff_er_frame_end(ERContext *s)
static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V) ...
void(* sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, int alpha, int width)
int repeat_pict
When decoding, this signals how much the picture must be delayed.
void(* clear_blocks)(int16_t *blocks)
uint8_t rangeredfrm
Frame decoding info for S/M profiles only.
HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the b...
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
void ff_print_debug_info(MpegEncContext *s, Picture *p)
Print debugging info for the given picture.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void(* add_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
uint8_t zz_8x8[4][64]
Zigzag table for TT_8x8, permuted for IDCT.
static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n, int coded, int mquant, int codingset)
Decode intra block in inter frames - more generic version than vc1_decode_i_block.
static void vc1_decode_b_blocks(VC1Context *v)
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
int end_mb_x
Horizontal macroblock limit (used only by mss2)
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
void ff_vc1_decode_blocks(VC1Context *v)
static void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
static int vc1_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)
enum AVDiscard skip_frame
#define VC1_4MV_BLOCK_PATTERN_VLC_BITS
static void vc1_apply_p_loop_filter(VC1Context *v)
const uint16_t ff_vc1_b_field_mvpred_scales[7][4]
struct AVHWAccel * hwaccel
Hardware accelerator in use.
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
int refdist
distance of the current picture from reference
uint8_t * acpred_plane
AC prediction flags bitplane.
static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe)
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static const AVProfile profiles[]
static int vc1_decode_b_mb_intfr(VC1Context *v)
Decode one B-frame MB (in interlaced frame B picture)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
void(* vc1_v_loop_filter4)(uint8_t *src, int stride, int pq)
int first_pic_header_flag
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
void(* sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1, const uint8_t *src2a, const uint8_t *src2b, int offset2, int alpha, int width)
void(* vc1_inv_trans_8x8)(int16_t *b)
int interlace
Progressive/interlaced (RPTFTM syntax element)
int y_ac_table_index
Luma index from AC2FRM element.
qpel_mc_func(* qpel_put)[16]
int c_ac_table_index
AC coding set indexes.
const int ff_vc1_ac_sizes[AC_MODES]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, int line_size, int16_t *block)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int ttfrm
Transform type info present at frame level.
Picture current_picture
copy of the current picture structure.
int codingset2
index of current table set from 11.8 to use for chroma block decoding
int16_t bfraction
Relative position % anchors=> how to scale MVs.
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
static void vc1_loop_filter_iblk_delayed(VC1Context *v, int pq)
static int get_bits_count(const GetBitContext *s)
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
static av_always_inline int scaleforsame_x(VC1Context *v, int n, int dir)
uint8_t * forward_mb_plane
bitplane for "forward" MBs
uint8_t last_luty[2][256]
int mb_height
number of MBs horizontally & vertically
int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
Decode Simple/Main Profiles sequence header.
static void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
Reconstruct motion vector for B-frame and do motion compensation.
uint8_t * over_flags_plane
Overflags bitplane.
static void vc1_decode_b_mb(VC1Context *v)
Decode one B-frame MB (in Main profile)
const int8_t ff_vc1_adv_interlaced_4x8_zz[32]
static void ff_update_block_index(MpegEncContext *s)
uint8_t last_lutuv[2][256]
lookup tables used for intensity compensation
uint8_t ttmbf
Transform type flag.
int k_y
Number of bits for MVs (depends on MV range)
static int get_bits_left(GetBitContext *gb)
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
int dmb_is_raw
direct mb plane is raw
static int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value for I-frames only prediction dir: left=0, top=1.
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, int line_size, int16_t *block)
#define VC1_CBPCY_P_VLC_BITS
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
int overlap
overlapped transforms in use
in the bitstream is reported as 11b
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
const int8_t ff_vc1_simple_progressive_4x4_zz[16]
void(* vc1_inv_trans_4x4)(uint8_t *dest, int line_size, int16_t *block)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static void vc1_mc_1mv(VC1Context *v, int dir)
Do motion compensation over 1 macroblock Mostly adapted hpel_motion and qpel_motion from mpegvideo...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static av_cold int vc1_decode_init(AVCodecContext *avctx)
Initialize a VC1/WMV3 decoder.
#define GET_MQUANT()
Get macroblock-level quantizer scale.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
void av_log(void *avcl, int level, const char *fmt,...)
const char * name
Name of the codec implementation.
#define IS_MARKER(state, i, buf, buf_size)
int quarter_sample
1->qpel, 0->half pel ME/MC
int low_delay
no reordering needed / has no b-frames
qpel_mc_func put_qpel_pixels_tab[2][16]
void ff_mpv_common_end(MpegEncContext *s)
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n, int dir)
static void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, uint8_t *is_intra, int dir)
Predict and set motion vector for interlaced frame picture MBs.
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
int res_rtm_flag
reserved, set to 1
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, int line_size, int16_t *block)
void(* put_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip, int *value, int codingset)
Decode one AC coefficient.
void ff_mpeg_flush(AVCodecContext *avctx)
void(* put_signed_pixels_clamped)(const int16_t *block, uint8_t *pixels, int line_size)
const int8_t ff_vc1_adv_interlaced_4x4_zz[16]
int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext *gb)
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static av_always_inline int scaleforopp(VC1Context *v, int n, int dim, int dir)
int cur_field_type
0: top, 1: bottom
const uint8_t ff_wmv1_scantable[WMV1_SCANTABLE_COUNT][64]
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
static av_always_inline int scaleforopp_x(VC1Context *v, int n)
enum AVPictureType pict_type
Picture type of the frame.
av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
uint8_t * blk_mv_type_base
av_cold void ff_intrax8_common_init(IntraX8Context *w, MpegEncContext *const s)
Initialize IntraX8 frame decoder.
int field_mode
1 for interlaced field pictures
av_cold void ff_intrax8_common_end(IntraX8Context *w)
Destroy IntraX8 frame structure.
static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n, int coded, int codingset, int mquant)
Decode intra block in intra frames - should be faster than decode_intra_block.
int width
picture width / height.
#define VC1_SUBBLKPAT_VLC_BITS
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
uint8_t mv_mode
Frame decoding info for all profiles.
#define FF_PROFILE_VC1_MAIN
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
#define FF_PROFILE_UNKNOWN
void ff_mpeg_er_frame_start(MpegEncContext *s)
static void vc1_decode_skip_blocks(VC1Context *v)
static av_always_inline void vc1_apply_p_v_loop_filter(VC1Context *v, int block_num)
av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
static const int offset_table[6]
static int median4(int a, int b, int c, int d)
static int vc1_decode_p_mb_intfr(VC1Context *v)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int block_last_index[12]
last non zero coefficient in block
static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x, int *dmv_y, int *pred_flag)
static av_always_inline void vc1_apply_p_h_loop_filter(VC1Context *v, int block_num)
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3]
const uint16_t ff_vc1_field_mvpred_scales[2][7][4]
#define FF_PROFILE_VC1_SIMPLE
int16_t(* luma_mv_base)[2]
int block_index[6]
index to current MB in block based arrays with edges
VLC * cbpcy_vlc
CBPCY VLC table.
static int decode210(GetBitContext *gb)
if(ac->has_optimized_func)
static const float pred[4]
uint8_t * sr_rows[2][2]
Sprite resizer line cache.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
static const int8_t mv[256][2]
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
static void vc1_loop_filter_iblk(VC1Context *v, int pq)
static void vc1_interp_mc(VC1Context *v)
Motion compensation for direct or interpolated blocks in B-frames.
int first_slice_line
used in mpeg4 too to handle resync markers
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
static const int offset_table1[9]
#define AV_LOG_INFO
Standard information.
int res_sprite
Simple/Main Profile sequence header.
void(* vc1_h_loop_filter4)(uint8_t *src, int stride, int pq)
int top_blk_sh
Either 3 or 0, positions of l/t in blk[].
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
const int8_t ff_vc1_adv_interlaced_8x8_zz[64]
static const uint8_t vc1_delta_run_table[AC_MODES][57]
static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
Do motion compensation for 4-MV macroblock - luminance block.
main external API structure.
static void close(AVCodecParserContext *s)
static int vc1_decode_p_mb_intfi(VC1Context *v)
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
int height
picture size. must be a multiple of 16
static void vc1_decode_b_mb_intfi(VC1Context *v)
Decode one B-frame MB (in interlaced field B picture)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static void init_block_index(VC1Context *v)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
void(* sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
const uint8_t ff_vc1_mbmode_intfrp[2][15][4]
static unsigned int get_bits1(GetBitContext *s)
int fmb_is_raw
forward mb plane is raw
static void skip_bits(GetBitContext *s, int n)
#define MB_INTRA_VLC_BITS
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
op_pixels_func put_no_rnd_pixels_tab[2][4]
Halfpel motion compensation with no rounding (a+b)>>1.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define VC1_2MV_BLOCK_PATTERN_VLC_BITS
static int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n, int a_avail, int c_avail, int16_t **dc_val_ptr, int *dir_ptr)
Get predicted DC value prediction dir: left=0, top=1.
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
int skip_is_raw
skip mb plane is not coded
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
int ff_intrax8_decode_picture(IntraX8Context *const w, int dquant, int quant_offset)
Decode single IntraX8 frame.
#define FF_PROFILE_VC1_COMPLEX
uint8_t next_lutuv[2][256]
lookup tables used for intensity compensation
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
int ref_field_type[2]
forward and backward reference field type (top or bottom)
uint8_t * direct_mb_plane
bitplane for "direct" MBs
static const uint8_t vc1_last_delta_run_table[AC_MODES][10]
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
static int vc1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Decode a VC1/WMV3 frame.
uint8_t * mv_type_mb_plane
bitplane for mv_type == (4MV)
int numref
number of past field pictures used as reference
const int32_t ff_vc1_dqscale[63]
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
qpel_mc_func(* qpel_avg)[16]
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
in the bitstream is reported as 10b
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
static const int offset_table2[9]
discard all non reference
static void vc1_decode_i_blocks(VC1Context *v)
Decode blocks of I-frame.
int pqindex
raw pqindex used in coding set selection
static const uint8_t vc1_last_delta_level_table[AC_MODES][44]
#define VC1_1REF_MVDATA_VLC_BITS
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
static enum AVPixelFormat vc1_hwaccel_pixfmt_list_420[]
#define VC1_TTMB_VLC_BITS
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
static av_cold void flush(AVCodecContext *avctx)
Flush (reset) the frame ID after seeking.
static const int size_table[6]
enum FrameCodingMode fcm
Frame decoding info for Advanced profile.
Picture last_picture
copy of the previous picture structure.
uint8_t dquantfrm
pquant parameters
uint8_t next_luty[2][256]
Picture * last_picture_ptr
pointer to the previous picture.
int res_fasttx
reserved, always 1
enum AVDiscard skip_loop_filter
int * ttblk
Transform type at the block level.
static av_cold int init(AVCodecParserContext *s)
VLC ff_vc1_ac_coeff_table[8]
void(* vc1_v_s_overlap)(int16_t *top, int16_t *bottom)
int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
#define VC1_INTFR_4MV_MBMODE_VLC_BITS
#define FF_PROFILE_VC1_ADVANCED
uint8_t pquantizer
Uniform (over sequence) quantizer in use.
h264_chroma_mc_func put_h264_chroma_pixels_tab[3]
static void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
AVFrame * sprite_output_frame
void ff_mpv_frame_end(MpegEncContext *s)
void(* sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
const int8_t ff_vc1_adv_interlaced_8x4_zz[32]
int16_t(* block)[64]
points to one of the following blocks
op_pixels_func avg_no_rnd_pixels_tab[4]
Halfpel motion compensation with no rounding (a+b)>>1.
uint8_t(* curr_luty)[256]
static int decode012(GetBitContext *gb)
VLC_TYPE(* table)[2]
code, bits
Picture next_picture
copy of the next picture structure.
int key_frame
1 -> keyframe, 0-> not
static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
Do motion compensation for 4-MV macroblock - both chroma blocks.
static void vc1_decode_i_blocks_adv(VC1Context *v)
Decode blocks of I-frame for advanced profile.
H264ChromaContext h264chroma
static av_always_inline int vc1_unescape_buffer(const uint8_t *src, int size, uint8_t *dst)
int level
Advanced Profile.
int flags
AVCodecContext.flags (HQ, MV4, ...)
int brfd
reference frame distance (forward or backward)
uint32_t * mb_type
types and macros are defined in mpegutils.h
uint8_t mv_mode2
Secondary MV coding mode (B frames)
int new_sprite
Frame decoding info for sprite modes.
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
#define FFSWAP(type, a, b)
void(* vc1_h_s_overlap)(int16_t *left, int16_t *right)
int codingset
index of current table set from 11.8 to use for luma block decoding
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
#define VC1_INTFR_NON4MV_MBMODE_VLC_BITS
AVPixelFormat
Pixel format.
This structure stores compressed data.
void(* vc1_v_overlap)(uint8_t *src, int stride)
void(* emulated_edge_mc)(uint8_t *buf, const uint8_t *src, ptrdiff_t buf_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
VLC ff_msmp4_dc_luma_vlc[2]
VLC ff_vc1_subblkpat_vlc[3]
uint8_t halfpq
Uniform quant over image and qp+.5.
static void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
static const uint8_t vc1_delta_level_table[AC_MODES][31]
uint8_t((* curr_lutuv)[256]
static av_always_inline const uint8_t * find_next_marker(const uint8_t *src, const uint8_t *end)
Find VC-1 marker in buffer.
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, int line_size, int16_t *block)
VLC ff_msmp4_dc_chroma_vlc[2]
op_pixels_func put_vc1_mspel_pixels_tab[16]
void(* sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)