summaryrefslogtreecommitdiffstats
path: root/drivers/media/platform/coda/coda-common.c
diff options
context:
space:
mode:
authorPhilipp Zabel <p.zabel@pengutronix.de>2020-03-18 19:35:33 +0100
committerMauro Carvalho Chehab <mchehab+huawei@kernel.org>2020-04-14 11:52:24 +0200
commit12310819a26aa567a9b80b675c2d4970ca797c0d (patch)
tree731659a1430cae04bd699e7ccf7454881e1460ad /drivers/media/platform/coda/coda-common.c
parent50733b5b9102da5b2415e3497aa89588396e4f20 (diff)
downloadlinux-12310819a26aa567a9b80b675c2d4970ca797c0d.tar.bz2
media: coda: jpeg: add CODA960 JPEG decoder support
This patch adds JPEG decoding support for CODA960, handling the JPEG hardware directly. A separate JPEG decoder video device is created due to the separate hardware unit and different supported pixel formats. While the hardware can not change subsampling on the fly, it can decode 4:2:2 subsampled JPEG images into YUV422P. [m.felsch@pengutronix.de: fix qsequence counting by explicitly checking for the !use_bit case] [hverkuil-cisco@xs4all.nl: fix 'warning: missing braces around initializer'] [mchehab+huawei@kernel.org: fix 'BRACES: Unbalanced braces around else statement'] Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de> Signed-off-by: Marco Felsch <m.felsch@pengutronix.de> Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Diffstat (limited to 'drivers/media/platform/coda/coda-common.c')
-rw-r--r--drivers/media/platform/coda/coda-common.c115
1 files changed, 108 insertions, 7 deletions
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 46163e524c1e..096e8e11f836 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -159,6 +159,7 @@ static const struct coda_codec coda9_codecs[] = {
CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
+ CODA_CODEC(CODA9_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192),
};
struct coda_video_device {
@@ -252,6 +253,22 @@ static const struct coda_video_device coda9_jpeg_encoder = {
},
};
+static const struct coda_video_device coda9_jpeg_decoder = {
+ .name = "coda-jpeg-decoder",
+ .type = CODA_INST_DECODER,
+ .ops = &coda9_jpeg_decode_ops,
+ .direct = true,
+ .src_formats = {
+ V4L2_PIX_FMT_JPEG,
+ },
+ .dst_formats = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_YUV422P,
+ },
+};
+
static const struct coda_video_device *codadx6_video_devices[] = {
&coda_bit_encoder,
};
@@ -270,6 +287,7 @@ static const struct coda_video_device *coda7_video_devices[] = {
static const struct coda_video_device *coda9_video_devices[] = {
&coda9_jpeg_encoder,
+ &coda9_jpeg_decoder,
&coda_bit_encoder,
&coda_bit_decoder,
};
@@ -411,6 +429,12 @@ static int coda_querycap(struct file *file, void *priv,
return 0;
}
+static const u32 coda_formats_420[CODA_MAX_FORMATS] = {
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+};
+
static int coda_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -421,10 +445,33 @@ static int coda_enum_fmt(struct file *file, void *priv,
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
formats = cvd->src_formats;
- else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ struct coda_q_data *q_data_src;
+ struct vb2_queue *src_vq;
+
formats = cvd->dst_formats;
- else
+
+ /*
+ * If the source format is already fixed, only allow the same
+ * chroma subsampling.
+ */
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
+ vb2_is_streaming(src_vq)) {
+ if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420) {
+ formats = coda_formats_420;
+ } else if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422) {
+ f->pixelformat = V4L2_PIX_FMT_YUV422P;
+ return f->index ? -EINVAL : 0;
+ }
+ }
+ } else {
return -EINVAL;
+ }
if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
return -EINVAL;
@@ -614,12 +661,23 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
/*
* If the source format is already fixed, only allow the same output
- * resolution
+ * resolution. When decoding JPEG images, we also have to make sure to
+ * use the same chroma subsampling.
*/
src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (vb2_is_streaming(src_vq)) {
f->fmt.pix.width = q_data_src->width;
f->fmt.pix.height = q_data_src->height;
+
+ if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG) {
+ if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420 &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;
+ else if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_422)
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
+ }
}
f->fmt.pix.colorspace = ctx->colorspace;
@@ -637,12 +695,18 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
if (ret < 0)
return ret;
- /* The decoders always write complete macroblocks */
+ /* The decoders always write complete macroblocks or MCUs */
if (ctx->inst_type == CODA_INST_DECODER) {
- f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
- f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
- f->fmt.pix.height * 3 / 2;
+ f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
+ if (codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
+ f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P) {
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 2;
+ } else {
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+ }
ret = coda_try_fmt_vdoa(ctx, f, &use_vdoa);
if (ret < 0)
@@ -747,6 +811,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
/* else fall through */
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
break;
default:
@@ -1894,6 +1959,42 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
}
}
+ /*
+ * Check the first input JPEG buffer to determine chroma
+ * subsampling.
+ */
+ if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG) {
+ buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ ret = coda_jpeg_decode_header(ctx, &buf->vb2_buf);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev,
+ "failed to decode JPEG header: %d\n",
+ ret);
+ goto err;
+ }
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ q_data_dst->width = round_up(q_data_src->width, 16);
+ q_data_dst->height = round_up(q_data_src->height, 16);
+ q_data_dst->bytesperline = q_data_dst->width;
+ if (ctx->params.jpeg_chroma_subsampling ==
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420) {
+ q_data_dst->sizeimage =
+ q_data_dst->bytesperline *
+ q_data_dst->height * 3 / 2;
+ if (q_data_dst->fourcc != V4L2_PIX_FMT_YUV420)
+ q_data_dst->fourcc = V4L2_PIX_FMT_NV12;
+ } else {
+ q_data_dst->sizeimage =
+ q_data_dst->bytesperline *
+ q_data_dst->height * 2;
+ q_data_dst->fourcc = V4L2_PIX_FMT_YUV422P;
+ }
+ q_data_dst->rect.left = 0;
+ q_data_dst->rect.top = 0;
+ q_data_dst->rect.width = q_data_src->width;
+ q_data_dst->rect.height = q_data_src->height;
+ }
ctx->streamon_out = 1;
} else {
ctx->streamon_cap = 1;