/* * Copyright (c) 2007 Bobby Bingham * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * scale video filter */ #include "avfilter.h" #include "libavutil/avstring.h" #include "libavutil/eval.h" #include "libavutil/pixdesc.h" #include "libswscale/swscale.h" #include "libavutil/cpu.h" #include "libavutil/imgutils.h" #include "avcodec.h" #include "yadif.h" #define DBUG(x) static const char *var_names[] = { "PI", "PHI", "E", "in_w", "iw", "in_h", "ih", "out_w", "ow", "out_h", "oh", "a", "hsub", "vsub", NULL }; enum var_name { VAR_PI, VAR_PHI, VAR_E, VAR_IN_W, VAR_IW, VAR_IN_H, VAR_IH, VAR_OUT_W, VAR_OW, VAR_OUT_H, VAR_OH, VAR_A, VAR_HSUB, VAR_VSUB, VARS_NB }; typedef struct { /** * 0: send 1 frame for each frame * 1: send 1 frame for each field * 2: like 0 but skips spatial interlacing check * 3: like 1 but skips spatial interlacing check */ int mode; /** * 0: bottom field first * 1: top field first * -1: auto-detection */ int parity; int frame_pending; int flush; AVFilterBufferRef *cur; AVFilterBufferRef *next; AVFilterBufferRef *prev; AVFilterBufferRef *out; void (*filter_line)(uint8_t *dst, uint8_t *prev, uint8_t *cur, uint8_t *next, int w, int prefs, int mrefs, int parity, int mode); const AVPixFmtDescriptor *csp; } YADIFContext; typedef struct { struct SwsContext *sws; ///< software scaler context /** * New dimensions. Special values are: * 0 = original width/height * -1 = keep original aspect */ int w, h; unsigned int flags; ///sws flags int hsub, vsub; ///< chroma subsampling int input_is_pal; ///< set to 1 if the input format is paletted int interlaced; char w_expr[256]; ///< width expression string char h_expr[256]; ///< height expression string YADIFContext yadif; DBUG(int count;) } ScaleContext; #define CHECK(j)\ { int score = FFABS(cur[mrefs-1+(j)] - cur[prefs-1-(j)])\ + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\ + FFABS(cur[mrefs+1+(j)] - cur[prefs+1-(j)]);\ if (score < spatial_score) {\ spatial_score= score;\ spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\ #define FILTER \ for (x = 0; x < w; x++) { \ int c = cur[mrefs]; \ int d = (prev2[0] + next2[0])>>1; \ int e = cur[prefs]; \ int temporal_diff0 = FFABS(prev2[0] - next2[0]); \ int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \ int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \ int diff = FFMAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2); \ int spatial_pred = (c+e)>>1; \ int spatial_score = FFABS(cur[mrefs-1] - cur[prefs-1]) + FFABS(c-e) \ + FFABS(cur[mrefs+1] - cur[prefs+1]) - 1; \ \ CHECK(-1) CHECK(-2) }} }} \ CHECK( 1) CHECK( 2) }} }} \ \ if (mode < 2) { \ int b = (prev2[2*mrefs] + next2[2*mrefs])>>1; \ int f = (prev2[2*prefs] + next2[2*prefs])>>1; \ int max = FFMAX3(d-e, d-c, FFMIN(b-c, f-e)); \ int min = FFMIN3(d-e, d-c, FFMAX(b-c, f-e)); \ \ diff = FFMAX3(diff, min, -max); \ } \ \ if (spatial_pred > d + diff) \ spatial_pred = d + diff; \ else if (spatial_pred < d - diff) \ spatial_pred = d - diff; \ \ dst[0] = spatial_pred; \ \ dst++; \ cur++; \ prev++; \ next++; \ prev2++; \ next2++; \ } static void filter_line_c(uint8_t *dst, uint8_t *prev, uint8_t *cur, uint8_t *next, int w, int prefs, int mrefs, int parity, int mode) { int x; uint8_t *prev2 = parity ? prev : cur ; uint8_t *next2 = parity ? cur : next; FILTER } static void filter_line_c_16bit(uint16_t *dst, uint16_t *prev, uint16_t *cur, uint16_t *next, int w, int prefs, int mrefs, int parity, int mode) { int x; uint16_t *prev2 = parity ? prev : cur ; uint16_t *next2 = parity ? cur : next; mrefs /= 2; prefs /= 2; FILTER } static void filter(AVFilterContext *ctx, AVFilterBufferRef *dstpic, int parity, int tff) { ScaleContext *scale = ctx->priv; YADIFContext *yadif = &(scale->yadif); int y, i; for (int plane=0; plane < yadif->csp->nb_components; plane++) { for (i = 0; i < yadif->csp->nb_components; i++) { if(yadif->csp->comp[i].plane==plane) { int wb = dstpic->video->w*(yadif->csp->comp[i].step_minus1+1); int h = dstpic->video->h; int linesize_in = yadif->cur->linesize[i]; int linesize_out = dstpic->linesize[i]; int df = (yadif->csp->comp[i].depth_minus1+1) / 8; DBUG(av_log(ctx, AV_LOG_DEBUG, " yadif comp=%d\n",i)); if (i) { wb >>= scale->hsub; h >>= scale->vsub; } uint8_t *prev = &yadif->prev->data[i][0]; uint8_t *cur = &yadif->cur ->data[i][0]; uint8_t *next = &yadif->next->data[i][0]; uint8_t *dst = &dstpic->data[i][0]; y = 0; if (parity == 0) { memcpy(dst, cur, wb); y = 1; prev += linesize_in; cur += linesize_in; next += linesize_in; dst += linesize_out; } while(y < h) { int mode = y==1 || y+2==h ? 2 : yadif->mode; yadif->filter_line(dst, prev, cur, next, wb, y+1src, AV_LOG_DEBUG, "SCALE: yadif_get_video_buffer.\n")); picref = avfilter_default_get_video_buffer(link, perms, width, height); picref->video->w = w; picref->video->h = h; for (i = 0; i < 3; i++) picref->data[i] += picref->linesize[i]; return picref; } static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) { ScaleContext *scale = ctx->priv; YADIFContext *yadif = &(scale->yadif); const char *p; av_unused int cpu_flags = av_get_cpu_flags(); DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: init,\n")); av_strlcpy(scale->w_expr, "iw", sizeof(scale->w_expr)); av_strlcpy(scale->h_expr, "ih", sizeof(scale->h_expr)); scale->flags = SWS_BILINEAR; if (args) { sscanf(args, "%255[^:]:%255[^:]:%d", scale->w_expr, scale->h_expr, &scale->interlaced); //sscanf(args, "%d:%d:%d", &scale->w, &scale->h, &scale->interlaced); p = strstr(args,"flags="); if (p) scale->flags = strtoul(p+6, NULL, 0); } // YADIF-Part yadif->mode = 1; // muss 1 oder 3 sein yadif->parity = -1; // scale->interlaced: -1=auto, 0=prog, 1=interl, 2=interl field swap switch(scale->interlaced) { case 0: // force progressive frames -> no deinterlace break; case 1: // force interlace, keep field order yadif->parity = 0; // YADIF-TFF -> Interleave-TFF break; case 2: // force interlace, swap field order yadif->parity = 1; // YADIF-BFF -> Interleave-TFF break; } yadif->filter_line = filter_line_c; if (HAVE_SSSE3 && cpu_flags & AV_CPU_FLAG_SSSE3) yadif->filter_line = ff_yadif_filter_line_ssse3; else if (HAVE_SSE && cpu_flags & AV_CPU_FLAG_SSE2) yadif->filter_line = ff_yadif_filter_line_sse2; else if (HAVE_MMX && cpu_flags & AV_CPU_FLAG_MMX) yadif->filter_line = ff_yadif_filter_line_mmx; return 0; } static av_cold void uninit(AVFilterContext *ctx) { ScaleContext *scale = ctx->priv; YADIFContext *yadif = &(scale->yadif); sws_freeContext(scale->sws); scale->sws = NULL; //YADIF-Part if (yadif->prev) avfilter_unref_buffer(yadif->prev); if (yadif->cur ) avfilter_unref_buffer(yadif->cur ); if (yadif->next) avfilter_unref_buffer(yadif->next); } static int query_formats(AVFilterContext *ctx) { static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_YUV410P, PIX_FMT_YUV411P, PIX_FMT_GRAY8, PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, AV_NE( PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE ), PIX_FMT_YUV440P, PIX_FMT_YUVJ440P, AV_NE( PIX_FMT_YUV420P16BE, PIX_FMT_YUV420P16LE ), AV_NE( PIX_FMT_YUV422P16BE, PIX_FMT_YUV422P16LE ), AV_NE( PIX_FMT_YUV444P16BE, PIX_FMT_YUV444P16LE ), PIX_FMT_YUVA420P, // 4 comp, 4 planes PIX_FMT_YUYV422, // 3 comp, 1 plane PIX_FMT_RGB24, // 3 comp, 1 plane PIX_FMT_BGR24, // 3 comp, 1 plane PIX_FMT_PAL8, // 1 comp, 1 plane PIX_FMT_UYVY422, // 3 comp, 1 plane PIX_FMT_UYYVYY411, // 3 comp, 1 plane PIX_FMT_NV12, // 3 comp, 2 planes PIX_FMT_NV21, // 3 comp, 2 planes PIX_FMT_ARGB, // 4 comp, 1 plane PIX_FMT_RGBA, // 4 comp, 1 plane PIX_FMT_ABGR, // 4 comp, 1 plane PIX_FMT_BGRA, // 4 comp, 1 plane PIX_FMT_RGB48BE, // 3 comp, 1 plane, 16 Bit PIX_FMT_RGB48LE, // 3 comp, 1 plane, 16 Bit PIX_FMT_BGR48BE, // 3 comp, 1 plane, 16 Bit PIX_FMT_BGR48LE, // 3 comp, 1 plane, 16 Bit PIX_FMT_Y400A, // 2 comp, 1 plane PIX_FMT_NONE }; ScaleContext *scale = ctx->priv; YADIFContext *yadif = &(scale->yadif); AVFilterFormats *formats; enum PixelFormat pix_fmt; int ret; if(scale->interlaced) { formats = avfilter_make_format_list(pix_fmts); avfilter_formats_ref(formats, &ctx->inputs[0]->out_formats); formats = avfilter_make_format_list(pix_fmts); avfilter_formats_ref(formats, &ctx->outputs[0]->in_formats); } else { if (ctx->inputs[0]) { formats = NULL; for(pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++) if( sws_isSupportedInput(pix_fmt) && (ret = avfilter_add_format(&formats, pix_fmt)) < 0) { avfilter_formats_unref(&formats); return ret; } avfilter_formats_ref(formats, &ctx->inputs[0]->out_formats); } if (ctx->outputs[0]) { formats = NULL; for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++) if ( sws_isSupportedOutput(pix_fmt) && (ret = avfilter_add_format(&formats, pix_fmt)) < 0) { avfilter_formats_unref(&formats); return ret; } avfilter_formats_ref(formats, &ctx->outputs[0]->in_formats); } } return 0; } static int config_props(AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFilterLink *inlink = outlink->src->inputs[0]; ScaleContext *scale = ctx->priv; int64_t w, h; double var_values[VARS_NB], res; char *expr; int ret; var_values[VAR_PI] = M_PI; var_values[VAR_PHI] = M_PHI; var_values[VAR_E] = M_E; var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w; var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h; var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN; var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN; var_values[VAR_A] = (float) inlink->w / inlink->h; var_values[VAR_HSUB] = 1<format].log2_chroma_w; var_values[VAR_VSUB] = 1<format].log2_chroma_h; /* evaluate width and height */ av_expr_parse_and_eval(&res, (expr = scale->w_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx); scale->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res; if ((ret = av_expr_parse_and_eval(&res, (expr = scale->h_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail; scale->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res; /* evaluate again the width, as it may depend on the output height */ if ((ret = av_expr_parse_and_eval(&res, (expr = scale->w_expr), var_names, var_values, NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail; scale->w = res; w = scale->w; h = scale->h; /* sanity check params */ if (w < -1 || h < -1) { av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n"); return AVERROR(EINVAL); } if (w == -1 && h == -1) scale->w = scale->h = 0; if (!(w = scale->w)) w = inlink->w; if (!(h = scale->h)) h = inlink->h; if (w == -1) w = av_rescale(h, inlink->w, inlink->h); if (h == -1) h = av_rescale(w, inlink->h, inlink->w); if (w > INT_MAX || h > INT_MAX || (h * inlink->w) > INT_MAX || (w * inlink->h) > INT_MAX) av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n"); outlink->w = w; outlink->h = h; /* TODO: make algorithm configurable */ av_log(ctx, AV_LOG_INFO, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s flags:0x%0x yadif:%d\n", inlink ->w, inlink ->h, av_pix_fmt_descriptors[ inlink->format].name, outlink->w, outlink->h, av_pix_fmt_descriptors[outlink->format].name, scale->flags,scale->interlaced); scale->input_is_pal = av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PAL; if(inlink->h == outlink->h) scale->interlaced=0; if (scale->sws) sws_freeContext(scale->sws); scale->sws = sws_getContext(inlink ->w, inlink ->h, inlink ->format, outlink->w, outlink->h, outlink->format, scale->flags, NULL, NULL, NULL); if (!scale->sws) return AVERROR(EINVAL); return 0; fail: av_log(NULL, AV_LOG_ERROR, "Error when evaluating the expression '%s'\n", expr); return ret; } static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) { ScaleContext *scale = link->dst->priv; YADIFContext *yadif = &(scale->yadif); AVFilterLink *outlink = link->dst->outputs[0]; AVFilterBufferRef *outpicref; DBUG(av_log(link->dst, AV_LOG_DEBUG, "SCALE: start_frame: enter,\n")); if(scale->interlaced) { //YADIF-Part if (yadif->prev) avfilter_unref_buffer(yadif->prev); yadif->prev = yadif->cur; yadif->cur = yadif->next; yadif->next = picref; if (yadif->cur == 0) return; // wenn cur (und next) vorhanden aber noch nicht prev, dann ist prev = ref cur if (yadif->prev == 0) yadif->prev = avfilter_ref_buffer(yadif->cur, AV_PERM_READ); yadif->csp = &av_pix_fmt_descriptors[link->format]; if (yadif->csp->comp[0].depth_minus1 == 15) yadif->filter_line = filter_line_c_16bit; } // nicht ganz in Ordnung, da es sich auf den YADIF-Output beziehen müsste scale->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w; scale->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; outpicref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); avfilter_copy_buffer_ref_props(outpicref, picref); outpicref->video->w = outlink->w; outpicref->video->h = outlink->h; outlink->out_buf = outpicref; av_reduce(&outpicref->video->sample_aspect_ratio.num, &outpicref->video->sample_aspect_ratio.den, (int64_t)picref->video->sample_aspect_ratio.num * outlink->h * link->w, (int64_t)picref->video->sample_aspect_ratio.den * outlink->w * link->h, INT_MAX); avfilter_start_frame(outlink, avfilter_ref_buffer(outpicref, ~0)); } static void draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { } static void end_frame(AVFilterLink *link) { AVFilterContext *ctx = link->dst; ScaleContext *scale = ctx->priv; YADIFContext *yadif = &(scale->yadif); int scaled_h; AVFilterBufferRef *cur_pic = link->cur_buf; // Default-Quell-Bild ist der Filter-Input DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: end_frame: enter,\n")); DBUG(av_log(ctx, AV_LOG_DEBUG, " cur_pic=0x%08x*%d: %dx%d@%lld,\n",cur_pic,cur_pic->buf->refcount,cur_pic->buf->w,cur_pic->buf->h,cur_pic->pts)); //do YADIF DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: link->w/h=%d/%d, \n",link->w,link->h)); DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: link->dst->inputs[0]->w/h=%d/%d, \n",link->dst->inputs[0]->w,link->dst->inputs[0]->h)); DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: link->dst->outputs[0]->w/h=%d/%d, \n",link->dst->outputs[0]->w,link->dst->outputs[0]->h)); DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: prev=0x%08x, cur=0x%08x, next=0x%08x,\n",yadif->prev,yadif->cur,yadif->next)); if(yadif->prev) DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: prev=0x%08x*%d: %dx%d@%lld,\n",yadif->prev,yadif->prev->buf->refcount,yadif->prev->buf->w,yadif->prev->buf->h,yadif->prev->pts)); if(yadif->cur) DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: cur=0x%08x*%d: %dx%d@%lld,\n",yadif->cur,yadif->cur->buf->refcount,yadif->cur->buf->w,yadif->cur->buf->h,yadif->cur->pts)); if(yadif->next) DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: next=0x%08x*%d: %dx%d@%lld,\n",yadif->next,yadif->next->buf->refcount,yadif->next->buf->w,yadif->next->buf->h,yadif->next->pts)); if(scale->interlaced && (yadif->prev==0 || yadif->cur==0 || yadif->next==0)) return; if(scale->interlaced>0 || (scale->interlaced == -1 && yadif->cur->video->interlaced==1)) { int tff, is_second; AVFilterBufferRef *field; AVFilterBufferRef *out = link->dst->outputs[0]->out_buf; int out_w = link->dst->outputs[0]->w; int out_h = link->dst->outputs[0]->h; int out_hsub = av_pix_fmt_descriptors[link->dst->outputs[0]->format].log2_chroma_w; int out_vsub = av_pix_fmt_descriptors[link->dst->outputs[0]->format].log2_chroma_h; int out_comps = av_pix_fmt_descriptors[link->dst->outputs[0]->format].nb_components; DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: yadif start,\n")); DBUG(av_log(ctx, AV_LOG_DEBUG, " out->video->w/h=%dx%d %s,\n",out->video->w,out->video->h,av_pix_fmt_descriptors[out->format].name)); // Puffer mit der Größe des Input-Bildes holen yadif->out = yadif_get_video_buffer(ctx->inputs[0], AV_PERM_WRITE | AV_PERM_PRESERVE | AV_PERM_REUSE, link->w, link->h); DBUG(av_log(ctx, AV_LOG_DEBUG, " yadif->out->video->w/h=%dx%d %s,\n",yadif->out->video->w,yadif->out->video->h,av_pix_fmt_descriptors[yadif->out->format].name)); for(int i=0; iout->format].nb_components; i++) { DBUG(av_log(ctx, AV_LOG_DEBUG, " yadif->out->linesize[%d]=%d,\n",i,yadif->out->linesize[i])); } //avfilter_copy_buffer_ref_props(yadif->out, yadif->cur); yadif->out->video->interlaced = 0; if (yadif->parity == -1) { tff = yadif->cur->video->interlaced ? yadif->cur->video->top_field_first : 1; } else { tff = yadif->parity^1; } if(scale->interlaced==2) out->video->top_field_first = out->video->top_field_first ? 0 : 1; //out->video->interlaced = 1; // Quell-Bild der Skalierung ist nun der YADIF-Output cur_pic = yadif->out; // Puffer mit der Größe des Output-Bildes holen (Fieldbuffer) field = avfilter_get_video_buffer(ctx->outputs[0], AV_PERM_WRITE, out_w, out_h); DBUG(av_log(ctx, AV_LOG_DEBUG, " field->video->w/h=%dx%d %s,\n",field->video->w,field->video->h,av_pix_fmt_descriptors[field->format].name)); for(int i=0; iformat].nb_components; i++) { DBUG(av_log(ctx, AV_LOG_DEBUG, " field->linesize[%d]=%d,\n",i,field->linesize[i])); } // field 1 (top field) YADIF nach Framebuffer is_second = 0; DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: yadif filter tff=%d,\n",tff)); filter(ctx, yadif->out, tff ^ !is_second, tff); // Skaliere Field 1 aus Framebuffer nach Fieldbuffer DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: sws filter,\n")); scaled_h = sws_scale(scale->sws, cur_pic->data, cur_pic->linesize, 0, link->h, field->data, field->linesize); // field 2 (bottom field) YADIF nach Framebuffer is_second = 1; DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: yadif filter tff=%d,\n",tff)); filter(ctx, yadif->out, tff ^ !is_second, tff); // Skaliere Field 2 aus Framebuffer nach Output DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: sws filter,\n")); scaled_h = sws_scale(scale->sws, cur_pic->data, cur_pic->linesize, 0, link->h, link->dst->outputs[0]->out_buf->data, link->dst->outputs[0]->out_buf->linesize); // interleave fields // kopiere alle geraden Zeilen aus dem Fieldbuffer in den Output DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: interleave fields,\n")); av_image_copy_plane(out->data[0], out->linesize[0]*2, field->data[0], field->linesize[0]*2, out_w*(av_pix_fmt_descriptors[field->format].comp[0].step_minus1+1), out_h>>1); for (int plane=1; planeformat].nb_components; plane++) { for (int comp=0; compformat].nb_components; comp++) { if(av_pix_fmt_descriptors[field->format].comp[comp].plane==plane) { av_image_copy_plane(out->data[plane], out->linesize[plane]*2, field->data[plane], field->linesize[plane]*2, (out_w>>out_hsub)*(av_pix_fmt_descriptors[field->format].comp[comp].step_minus1+1), (out_h>>1)>>out_vsub); break; } } } avfilter_unref_buffer(field); avfilter_unref_buffer(yadif->out); DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: yadif end,\n")); } else { scaled_h = sws_scale(scale->sws, cur_pic->data, cur_pic->linesize, 0, link->h, link->dst->outputs[0]->out_buf->data, link->dst->outputs[0]->out_buf->linesize); } DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: end_frame: forward draw_slice,\n")); avfilter_draw_slice(link->dst->outputs[0], 0, scaled_h, 1); avfilter_unref_buffer(link->dst->outputs[0]->out_buf); link->dst->outputs[0]->out_buf = NULL; DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: end_frame: forward end_frame,\n")); avfilter_end_frame(link->dst->outputs[0]); if(scale->interlaced==0) avfilter_unref_buffer(cur_pic); DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: end_frame(%d): exit.\n",++(scale->count))); } static int request_frame(AVFilterLink *link) { AVFilterContext *ctx = link->src; ScaleContext *scale = ctx->priv; YADIFContext *yadif = &(scale->yadif); int ret; DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: request_frame: enter,\n")); if (yadif->flush) { if (!yadif->next) return -1; DBUG(av_log(ctx, AV_LOG_DEBUG, " flush request_frame,\n")); start_frame(link->src->inputs[0], avfilter_ref_buffer(yadif->next, AV_PERM_READ)); end_frame(link->src->inputs[0]); return 0; } DBUG(av_log(ctx, AV_LOG_DEBUG, " forward request_frame,\n")); if ((ret = avfilter_request_frame(link->src->inputs[0]))) return ret; DBUG(av_log(ctx, AV_LOG_DEBUG, " exit request_frame=0.\n")); return 0; } static int poll_frame(AVFilterLink *link, int flush) { AVFilterContext *ctx = link->src; ScaleContext *scale = ctx->priv; YADIFContext *yadif = &(scale->yadif); int ret, val; DBUG(av_log(ctx, AV_LOG_DEBUG, "SCALE: poll_frame: enter,\n")); DBUG(av_log(ctx, AV_LOG_DEBUG, " forward poll_frame,\n")); val = avfilter_poll_frame(link->src->inputs[0], flush); DBUG(av_log(ctx, AV_LOG_DEBUG, " %d frames in buffer,\n",val)); if(scale->interlaced) { if (val==1 && !yadif->next) { //FIXME change API to not requre this red tape DBUG(av_log(ctx, AV_LOG_DEBUG, " pipe empty -> forward request_frame,\n")); if ((ret = avfilter_request_frame(link->src->inputs[0])) < 0) { DBUG(av_log(ctx, AV_LOG_DEBUG, " request_frame error, exit.\n")); return ret; } DBUG(av_log(ctx, AV_LOG_DEBUG, " 2. forward poll_frame,\n")); val = avfilter_poll_frame(link->src->inputs[0], flush); DBUG(av_log(ctx, AV_LOG_DEBUG, " %d frames ready,\n",val)); DBUG(av_log(ctx, AV_LOG_DEBUG, " assert,\n")); assert(yadif->next); } if (val==0 && flush && yadif->flush==0) { DBUG(av_log(ctx, AV_LOG_DEBUG, " start flushing,\n")); yadif->flush = 1; val = 1; } } DBUG(av_log(ctx, AV_LOG_DEBUG, " exit poll_frame=%d.\n",val)); return val; } AVFilter avfilter_vf_scale = { .name = "scale", .description = NULL_IF_CONFIG_SMALL("Scale the input video to width:height size and/or convert the image format."), .init = init, .uninit = uninit, .query_formats = query_formats, .priv_size = sizeof(ScaleContext), .inputs = (AVFilterPad[]) {{ .name = "default", .type = AVMEDIA_TYPE_VIDEO, .start_frame = start_frame, .end_frame = end_frame, .get_video_buffer = yadif_get_video_buffer, .draw_slice = draw_slice, .min_perms = AV_PERM_READ, }, { .name = NULL}}, .outputs = (AVFilterPad[]) {{ .name = "default", .type = AVMEDIA_TYPE_VIDEO, .config_props = config_props, .poll_frame = poll_frame, .request_frame = request_frame, }, { .name = NULL}}, };