Decodificar video y codificar nuevamente por Mediacodec obtiene un archivo dañado

Estoy tratando de implementarhttps://android.googlesource.com/platform/cts/+/jb-mr2-release/tests/tests/media/src/android/media/cts/DecodeEditEncodeTest.java pero modificando la fuente usando un archivo de video mp4. El tipo mime es video / avc, velocidad de bits 288 kbps, iframeinterval 100, ancho: 176, altura: 144. El tamaño del archivo es 6 MB. Cuando decodifico el video y coloco el marco en la superficie de salida, puedo guardar el marco en un mapa de bits y ver el marco excelente. Pero al final, después de la codificación (con los mismos parámetros del video original), obtengo un archivo de 700 kb y no puedo ver el video (puede ser un archivo dañado).

                extractor = new MediaExtractor();
                extractor.SetDataSource(filePath);
                for (int i = 0; i < extractor.TrackCount; i++)
                {
                    inputFormat = extractor.GetTrackFormat(i);
                    string mime = inputFormat.GetString(MediaFormat.KeyMime);
                    if (mime.StartsWith("video/"))
                    {
                        extractor.SelectTrack(i);
                        mimeType = mime;
                        break;
                    }
                }
                mWidth = inputFormat.GetInteger(MediaFormat.KeyWidth);
                mHeight = inputFormat.GetInteger(MediaFormat.KeyHeight);
               // Create an encoder format that matches the input format.  (Might be able to just
                // re-use the format used to generate the video, since we want it to be the same.)
                MediaFormat outputFormat = MediaFormat.CreateVideoFormat(mimeType, mWidth, mHeight);
                outputFormat.SetInteger(MediaFormat.KeyColorFormat,
                        (int)MediaCodecCapabilities.Formatsurface);
                outputFormat.SetInteger(MediaFormat.KeyBitRate, 288000);
                outputFormat.SetInteger(MediaFormat.KeyFrameRate,
                        inputFormat.GetInteger(MediaFormat.KeyFrameRate));
                outputFormat.SetInteger(MediaFormat.KeyIFrameInterval, 100);
                outputData.setMediaFormat(outputFormat);
                encoder = MediaCodec.CreateEncoderByType(mimeType);
                encoder.Configure(outputFormat, null, null, MediaCodecConfigFlags.Encode);
                inputSurface = new InputSurface(encoder.CreateInputSurface());
                inputSurface.makeCurrent();
                encoder.Start();
                // OutputSurface uses the EGL context created by InputSurface.
                decoder = MediaCodec.CreateDecoderByType(mimeType);
                outputSurface = new OutputSurface();
                outputSurface.changeFragmentShader(FRAGMENT_SHADER);
                decoder.Configure(inputFormat, outputSurface.getSurface(), null, 0);
                decoder.Start();
                editVideoData2(extractor, decoder, outputSurface, inputSurface, encoder, outputData);

y la parte de decodificación-codificación:

            while (!outputDone)
            {
                if (VERBOSE) Log.Debug(TAG, "edit loop");
                // Feed more data to the decoder.
                if (!inputDone)
                {

                    int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC);
                    if (inputBufIndex >= 0)
                    {
                        ByteBuffer buffer = decoderInputBuffers[inputBufIndex];
                        int sampleSize = extractor.ReadSampleData(buffer, 0);
                        if (sampleSize < 0)
                        {
                            inputChunk++;
                            // End of stream -- send empty frame with EOS flag set.
                            decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L,
                                    MediaCodecBufferFlags.EndOfStream);
                            inputDone = true;
                            if (VERBOSE) Log.Debug(TAG, "sent input EOS (with zero-length frame)");
                        }
                        else {
                            // Copy a chunk of input to the decoder.  The first chunk should have
                            // the BUFFER_FLAG_CODEC_CONFIG flag set.
                            buffer.Clear();
                            decoder.QueueInputBuffer(inputBufIndex, 0, sampleSize, extractor.SampleTime, 0);
                            extractor.Advance();

                            inputChunk++;
                        }
                    }
                    else {
                        if (VERBOSE) Log.Debug(TAG, "input buffer not available");
                    }
                }
                // Assume output is available.  Loop until both assumptions are false.
                bool decoderOutputAvailable = !decoderDone;
                bool encoderOutputAvailable = true;
                while (decoderOutputAvailable || encoderOutputAvailable)
                {
                    // Start by draining any pending output from the encoder.  It's important to
                    // do this before we try to stuff any more data in.
                    int encoderStatus = encoder.DequeueOutputBuffer(info, TIMEOUT_USEC);
                    if (encoderStatus == (int)MediaCodecInfoState.TryAgainLater)
                    {
                        // no output available yet
                        if (VERBOSE) Log.Debug(TAG, "no output from encoder available");
                        encoderOutputAvailable = false;
                    }
                    else if (encoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged)
                    {
                        encoderOutputBuffers = encoder.GetOutputBuffers();
                        if (VERBOSE) Log.Debug(TAG, "encoder output buffers changed");
                    }
                    else if (encoderStatus == (int)MediaCodecInfoState.OutputFormatChanged)
                    {
                        MediaFormat newFormat = encoder.OutputFormat;
                        if (VERBOSE) Log.Debug(TAG, "encoder output format changed: " + newFormat);
                    }
                    else if (encoderStatus < 0)
                    {
                        Log.Error(TAG, "unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus);
                    }
                    else { // encoderStatus >= 0
                        ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
                        if (encodedData == null)
                        {
                            Log.Error(TAG,"encoderOutputBuffer " + encoderStatus + " was null");
                        }
                        // Write the data to the output "file".
                        if (info.Size != 0)
                        {
                            encodedData.Position(info.Offset);
                            encodedData.Limit(info.Offset + info.Size);
                            byte[] data = new byte[encodedData.Remaining()];
                            encodedData.Get(data);
                            fStream.Write(data, 0, data.Length);
                           // outputData.addChunk(encodedData, (int)info.Flags, info.PresentationTimeUs);
                            outputCount++;
                            if (VERBOSE) Log.Debug(TAG, "encoder output " + info.Size + " bytes");
                        }
                        outputDone = (info.Flags & MediaCodecBufferFlags.EndOfStream) != 0;
                        encoder.ReleaseOutputBuffer(encoderStatus, false);
                    }
                    if (encoderStatus != (int)MediaCodecInfoState.TryAgainLater)
                    {
                        // Continue attempts to drain output.
                        continue;
                    }
                    // Encoder is drained, check to see if we've got a new frame of output from
                    // the decoder.  (The output is going to a Surface, rather than a ByteBuffer,
                    // but we still get information through BufferInfo.)
                    if (!decoderDone)
                    {
                        int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC);
                        if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater)
                        {
                            // no output available yet
                            if (VERBOSE) Log.Debug(TAG, "no output from decoder available");
                            decoderOutputAvailable = false;
                        }
                        else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged)
                        {
                            //decoderOutputBuffers = decoder.GetOutputBuffers();
                            if (VERBOSE) Log.Debug(TAG, "decoder output buffers changed (we don't care)");
                        }
                        else if (decoderStatus == (int)MediaCodecInfoState.OutputFormatChanged)
                        {
                            // expected before first buffer of data
                            MediaFormat newFormat = decoder.OutputFormat;
                            if (VERBOSE) Log.Debug(TAG, "decoder output format changed: " + newFormat);
                        }
                        else if (decoderStatus < 0)
                        {
                            Log.Error(TAG,"unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus);
                        }
                        else { // decoderStatus >= 0
                            if (VERBOSE) Log.Debug(TAG, "surface decoder given buffer "
                                    + decoderStatus + " (size=" + info.Size + ")");
                            // The ByteBuffers are null references, but we still get a nonzero
                            // size for the decoded data.
                            bool doRender = (info.Size != 0);
                            // As soon as we call releaseOutputBuffer, the buffer will be forwarded
                            // to SurfaceTexture to convert to a texture.  The API doesn't
                            // guarantee that the texture will be available before the call
                            // returns, so we need to wait for the onFrameAvailable callback to
                            // fire.  If we don't wait, we risk rendering from the previous frame.
                            decoder.ReleaseOutputBuffer(decoderStatus, doRender);
                            if (doRender)
                            {
                                // This waits for the image and renders it after it arrives.
                                if (VERBOSE) Log.Debug(TAG, "awaiting frame");
                                outputSurface.awaitNewImage();
                                outputSurface.drawImage();
                                outputSurface.saveFrame(Android.OS.Environment.ExternalStorageDirectory + "/test.jpg", mWidth, mHeight);
                                // Send it to the encoder.
                                inputSurface.setPresentationTime(info.PresentationTimeUs * 1000);
                                if (VERBOSE) Log.Debug(TAG, "swapBuffers");
                                inputSurface.swapBuffers();
                            }
                            if ((info.Flags & MediaCodecBufferFlags.EndOfStream) != 0)
                            {
                                // forward decoder EOS to encoder
                                if (VERBOSE) Log.Debug(TAG, "signaling input EOS");
                                if (WORK_AROUND_BUGS)
                                {
                                    // Bail early, possibly dropping a frame.
                                    return;
                                }
                                else {
                                    encoder.SignalEndOfInputStream();
                                }
                            }
                        }
                    }
                }
            }
            if (inputChunk != outputCount)
            {
                throw new RuntimeException("frame lost: " + inputChunk + " in, " +
                        outputCount + " out");
            }
            fStream.Close();

Si obtengo el marco en una imagen y puedo ver que está bien, supongo que el marco está bien en OutputSurface. Y no veo nada extraño en la configuración del codificador. ¿Podrías ayudarme, al menos diciendo qué cree que podría comprobar? Gracias.

Respuestas a la pregunta(1)

Su respuesta a la pregunta