diff --git a/app/src/main/java/com/facebook/encapp/AsyncBufferEncoder.java b/app/src/main/java/com/facebook/encapp/AsyncBufferEncoder.java index 98707e0c..5dea6888 100644 --- a/app/src/main/java/com/facebook/encapp/AsyncBufferEncoder.java +++ b/app/src/main/java/com/facebook/encapp/AsyncBufferEncoder.java @@ -87,7 +87,7 @@ public String start() { // Initialize input reader String filepath = mTest.getInput().getFilepath(); - if (filepath.equals("fake_input")) { + if (filepath.startsWith("fake_input")) { mFakeInputReader = new FakeInputReader(); if (!mFakeInputReader.openFile(filepath, inputFmt, width, height)) { return "Could not initialize fake input"; diff --git a/app/src/main/java/com/facebook/encapp/BufferEncoder.java b/app/src/main/java/com/facebook/encapp/BufferEncoder.java index 8805cac0..af3d2ce5 100644 --- a/app/src/main/java/com/facebook/encapp/BufferEncoder.java +++ b/app/src/main/java/com/facebook/encapp/BufferEncoder.java @@ -62,7 +62,7 @@ public String start() { PixFmt inputFmt = mTest.getInput().getPixFmt(); mRefFramesizeInBytes = MediaCodecInfoHelper.frameSizeInBytes(inputFmt, sourceResolution.getWidth(), sourceResolution.getHeight()); - if (mTest.getInput().getFilepath().equals("fake_input")) { + if (mTest.getInput().getFilepath().startsWith("fake_input")) { mIsFakeInput = true; Log.d(TAG, "Using fake input for performance testing"); mFakeInputReader = new FakeInputReader(); diff --git a/app/src/main/java/com/facebook/encapp/CustomEncoder.java b/app/src/main/java/com/facebook/encapp/CustomEncoder.java index 3cba4344..484bcd36 100644 --- a/app/src/main/java/com/facebook/encapp/CustomEncoder.java +++ b/app/src/main/java/com/facebook/encapp/CustomEncoder.java @@ -47,6 +47,10 @@ class CustomEncoder extends Encoder { public static native byte[] getHeader(); // TODO: can the size, color and bitdepth change runtime? public static native int encode(byte[] input, byte[] output, FrameInfo info); + // Flush buffered frames. Call until returns 0. + public static native int flushEncoder(byte[] output, FrameInfo info); + // Get number of frames currently buffered in encoder + public static native int getDelayedFrames(); public static native StringParameter[] getAllEncoderSettings(); @@ -90,7 +94,7 @@ public CustomEncoder(Test test, String filesDir) { Log.e(TAG, "Failed to load library, " + name + ", " + targetPath + ": " + e.getMessage()); } } - + public static byte[] readYUVFromFile(String filePath, int size, int framePosition) throws IOException { byte[] inputBuffer = new byte[size]; @@ -172,7 +176,7 @@ public void setRuntimeParameters(int frame) { } for (Long sync : mRuntimeParams.getRequestSyncList()) { - if (sync == frame) { + if (sync == frame) { addEncoderParameters(params, DataValueType.longType.name(), "request-sync", ""); break; } @@ -318,11 +322,13 @@ public String start() { outputBufferSize = encode(yuvData, outputBuffer, info); // Look at nal type as well, not just key frame? // To ms? - mStats.stopEncodingFrame(info.getPts() , info.getSize(), info.isIFrame()); - if (outputBufferSize == 0) { - return "Failed to encode frame"; - } else if (outputBufferSize == -1) { - return "Encoder not started"; + if (outputBufferSize < 0) { + return "Encoder not started or error occurred"; + } + // outputBufferSize == 0 means frame was buffered (B-frame reordering) + // This is normal when B-frames are enabled, we'll get output later + if (outputBufferSize > 0) { + mStats.stopEncodingFrame(info.getPts() , info.getSize(), info.isIFrame()); } currentFramePosition += frameSize; mFramesAdded++; @@ -359,16 +365,17 @@ public String start() { muxerStarted = true; } - ByteBuffer buffer = ByteBuffer.wrap(outputBuffer); - bufferInfo.offset = 0; - bufferInfo.size = outputBufferSize; - bufferInfo.presentationTimeUs = info.getPts(); + // Only write to muxer if we have actual output + if (outputBufferSize > 0 && mMuxerWrapper != null) { + ByteBuffer buffer = ByteBuffer.wrap(outputBuffer); + bufferInfo.offset = 0; + bufferInfo.size = outputBufferSize; + bufferInfo.presentationTimeUs = info.getPts(); - //TODO: we get this from FrameInfo instead - boolean isKeyFrame = checkIfKeyFrame(outputBuffer); - if (isKeyFrame) bufferInfo.flags = MediaCodec.BUFFER_FLAG_KEY_FRAME; + boolean isKeyFrame = checkIfKeyFrame(outputBuffer); + if (isKeyFrame) bufferInfo.flags = MediaCodec.BUFFER_FLAG_KEY_FRAME; + else bufferInfo.flags = 0; - if(mMuxerWrapper != null) { buffer.position(bufferInfo.offset); buffer.limit(bufferInfo.offset + bufferInfo.size); @@ -382,6 +389,35 @@ public String start() { } mStats.stop(); + // Flush any remaining buffered frames (important for B-frames/multi-threading) + int delayedFrames = getDelayedFrames(); + Log.d(TAG, "Flushing " + delayedFrames + " delayed frames from encoder"); + while (delayedFrames > 0) { + info = new FrameInfo(0); + outputBufferSize = flushEncoder(outputBuffer, info); + if (outputBufferSize <= 0) { + break; // No more frames or error + } + Log.d(TAG, "Flushed frame: pts=" + info.getPts() + ", size=" + outputBufferSize); + + // Write flushed frame to muxer + if (mMuxerWrapper != null && muxerStarted) { + ByteBuffer buffer = ByteBuffer.wrap(outputBuffer); + bufferInfo.offset = 0; + bufferInfo.size = outputBufferSize; + bufferInfo.presentationTimeUs = info.getPts(); + + boolean isKeyFrame = checkIfKeyFrame(outputBuffer); + bufferInfo.flags = isKeyFrame ? MediaCodec.BUFFER_FLAG_KEY_FRAME : 0; + + buffer.position(bufferInfo.offset); + buffer.limit(bufferInfo.offset + bufferInfo.size); + mMuxerWrapper.writeSampleData(videoTrackIndex, buffer, bufferInfo); + } + delayedFrames = getDelayedFrames(); + } + Log.d(TAG, "Encoder flush complete"); + Log.d(TAG, "Close encoder and streams"); close(); diff --git a/app/src/main/java/com/facebook/encapp/SurfaceEncoder.java b/app/src/main/java/com/facebook/encapp/SurfaceEncoder.java index 3cc872ec..766233fc 100644 --- a/app/src/main/java/com/facebook/encapp/SurfaceEncoder.java +++ b/app/src/main/java/com/facebook/encapp/SurfaceEncoder.java @@ -57,6 +57,7 @@ class SurfaceEncoder extends Encoder implements VsyncListener { boolean mIsCameraSource = false; boolean mIsFakeInput = false; FakeGLRenderer mFakeGLRenderer; // GL-based fake input (fast!) + FakeGLRenderer.PatternType mFakeInputPatternType = FakeGLRenderer.PatternType.TEXTURE; // Default pattern boolean mUseCameraTimestamp = true; OutputMultiplier mOutputMult; Bundle mKeyFrameBundle; @@ -115,10 +116,12 @@ public String encode( if (mTest.getInput().getPixFmt().getNumber() == PixFmt.rgba_VALUE) { mIsRgbaSource = true; mRefFramesizeInBytes = width * height * 4; - } else if (mTest.getInput().getFilepath().equals("fake_input")) { + } else if (mTest.getInput().getFilepath().startsWith("fake_input")) { mIsFakeInput = true; + // Parse pattern type from "fake_input.type" notation + mFakeInputPatternType = parseFakeInputPatternType(mTest.getInput().getFilepath()); // Use GL rendering for fake input - ZERO CPU overhead! - Log.d(TAG, "Using fake input with GL rendering for performance testing"); + Log.d(TAG, "Using fake input with GL rendering, pattern: " + mFakeInputPatternType); } else if (mTest.getInput().getFilepath().equals("camera")) { mIsCameraSource = true; //TODO: handle other fps (i.e. try to set lower or higher fps) @@ -164,8 +167,9 @@ public String encode( if (mIsFakeInput) { // Initialize FakeGLRenderer (will be set up later on GL thread) mFakeGLRenderer = new FakeGLRenderer(); - mFakeGLRenderer.setPatternType(FakeGLRenderer.PatternType.TEXTURE); - Log.d(TAG, "Created FakeGLRenderer for GL-based fake input"); + mFakeGLRenderer.setPatternType(mFakeInputPatternType); + mFakeGLRenderer.setDimensions(width, height); + Log.d(TAG, "Created FakeGLRenderer for GL-based fake input with pattern: " + mFakeInputPatternType); // Initialize on GL thread after OutputMultiplier is ready } else { mYuvReader = new FileReader(); @@ -714,4 +718,43 @@ public void vsync(long frameTimeNs) { mSyncLock.notifyAll(); } } + + /** + * Parse pattern type from fake_input filepath notation. + * Supports: "fake_input", "fake_input.clock", "fake_input.texture", "fake_input.gradient", "fake_input.solid" + * + * @param filepath The input filepath (e.g., "fake_input.clock") + * @return The parsed PatternType, defaults to TEXTURE if not specified or unknown + */ + private FakeGLRenderer.PatternType parseFakeInputPatternType(String filepath) { + if (filepath == null || !filepath.startsWith("fake_input")) { + return FakeGLRenderer.PatternType.TEXTURE; + } + + // Check for ".type" suffix + if (filepath.contains(".")) { + String suffix = filepath.substring(filepath.lastIndexOf('.') + 1).toLowerCase(); + switch (suffix) { + case "clock": + Log.d(TAG, "Parsed fake_input pattern type: CLOCK"); + return FakeGLRenderer.PatternType.CLOCK; + case "texture": + Log.d(TAG, "Parsed fake_input pattern type: TEXTURE"); + return FakeGLRenderer.PatternType.TEXTURE; + case "gradient": + Log.d(TAG, "Parsed fake_input pattern type: GRADIENT"); + return FakeGLRenderer.PatternType.GRADIENT; + case "solid": + Log.d(TAG, "Parsed fake_input pattern type: SOLID"); + return FakeGLRenderer.PatternType.SOLID; + default: + Log.w(TAG, "Unknown fake_input pattern type: " + suffix + ", using TEXTURE"); + return FakeGLRenderer.PatternType.TEXTURE; + } + } + + // No suffix, use default + Log.d(TAG, "No pattern type specified, using default: TEXTURE"); + return FakeGLRenderer.PatternType.TEXTURE; + } } diff --git a/app/src/main/java/com/facebook/encapp/utils/FakeGLRenderer.java b/app/src/main/java/com/facebook/encapp/utils/FakeGLRenderer.java index 9b116a38..8f0c8f01 100644 --- a/app/src/main/java/com/facebook/encapp/utils/FakeGLRenderer.java +++ b/app/src/main/java/com/facebook/encapp/utils/FakeGLRenderer.java @@ -46,10 +46,11 @@ public class FakeGLRenderer { // Fragment shader - solid gray (for testing - most compressible) private static final String FRAGMENT_SHADER_SOLID = "precision mediump float;\n" + + "varying vec2 vTexCoord;\n" + "uniform float uTime;\n" + "void main() {\n" + - " // Pulsating gray\n" + - " float gray = 0.5 + 0.3 * sin(uTime * 2.0);\n" + + " // Pulsating gray with imperceptible vTexCoord contribution to prevent optimization\n" + + " float gray = 0.5 + 0.3 * sin(uTime * 2.0) + vTexCoord.x * 0.001;\n" + " gl_FragColor = vec4(gray, gray, gray, 1.0);\n" + "}\n"; @@ -77,6 +78,196 @@ public class FakeGLRenderer { " gl_FragColor = vec4(baseR, baseG, baseB, 1.0);\n" + "}\n"; + // Fragment shader - analog clock with hour, minute, second hands and frame counter + private static final String FRAGMENT_SHADER_CLOCK = + "precision highp float;\n" + + "varying vec2 vTexCoord;\n" + + "uniform float uTime;\n" + + "uniform float uFrameCount;\n" + + "uniform float uAspectRatio;\n" + + "\n" + + "#define PI 3.14159265359\n" + + "#define TWO_PI 6.28318530718\n" + + "\n" + + "// Draw a clock hand as a line segment from center\n" + + "float drawHand(vec2 uv, float angle, float length, float width) {\n" + + " // Rotate by -90 degrees so 0 angle points up (12 o'clock)\n" + + " float a = -angle + PI * 0.5;\n" + + " vec2 dir = vec2(cos(a), sin(a));\n" + + " \n" + + " // Project point onto the hand direction\n" + + " float proj = dot(uv, dir);\n" + + " \n" + + " // Only draw from center outward to length\n" + + " if (proj < 0.0 || proj > length) return 0.0;\n" + + " \n" + + " // Distance from the line\n" + + " vec2 closestPoint = dir * proj;\n" + + " float dist = length(uv - closestPoint);\n" + + " \n" + + " // Smooth anti-aliased line\n" + + " return 1.0 - smoothstep(width * 0.5 - 0.005, width * 0.5 + 0.005, dist);\n" + + "}\n" + + "\n" + + "// Draw tick marks around the clock face\n" + + "float drawTicks(vec2 uv, float radius) {\n" + + " float result = 0.0;\n" + + " float angle = atan(uv.y, uv.x);\n" + + " float dist = length(uv);\n" + + " \n" + + " // Hour ticks (12)\n" + + " for (int i = 0; i < 12; i++) {\n" + + " float tickAngle = float(i) * TWO_PI / 12.0;\n" + + " vec2 tickDir = vec2(cos(tickAngle), sin(tickAngle));\n" + + " \n" + + " // Draw thick tick from radius-0.08 to radius\n" + + " float proj = dot(uv, tickDir);\n" + + " if (proj > radius - 0.1 && proj < radius) {\n" + + " vec2 closestPoint = tickDir * proj;\n" + + " float d = length(uv - closestPoint);\n" + + " float tickWidth = (mod(float(i), 3.0) == 0.0) ? 0.025 : 0.015;\n" + + " result = max(result, 1.0 - smoothstep(tickWidth * 0.5, tickWidth * 0.5 + 0.005, d));\n" + + " }\n" + + " }\n" + + " return result;\n" + + "}\n" + + "\n" + + "// Draw a single digit at position\n" + + "float drawDigit(vec2 uv, int digit, vec2 pos, float size) {\n" + + " vec2 p = (uv - pos) / size;\n" + + " if (p.x < 0.0 || p.x > 0.6 || p.y < 0.0 || p.y > 1.0) return 0.0;\n" + + " \n" + + " // 7-segment display encoding\n" + + " // Segments: top, top-right, bottom-right, bottom, bottom-left, top-left, middle\n" + + " float w = 0.15;\n" + + " float result = 0.0;\n" + + " \n" + + " // Segment patterns for digits 0-9\n" + + " // Each digit encoded as which segments are on\n" + + " bool seg[7];\n" + + " \n" + + " if (digit == 0) { seg[0]=true; seg[1]=true; seg[2]=true; seg[3]=true; seg[4]=true; seg[5]=true; seg[6]=false; }\n" + + " else if (digit == 1) { seg[0]=false; seg[1]=true; seg[2]=true; seg[3]=false; seg[4]=false; seg[5]=false; seg[6]=false; }\n" + + " else if (digit == 2) { seg[0]=true; seg[1]=true; seg[2]=false; seg[3]=true; seg[4]=true; seg[5]=false; seg[6]=true; }\n" + + " else if (digit == 3) { seg[0]=true; seg[1]=true; seg[2]=true; seg[3]=true; seg[4]=false; seg[5]=false; seg[6]=true; }\n" + + " else if (digit == 4) { seg[0]=false; seg[1]=true; seg[2]=true; seg[3]=false; seg[4]=false; seg[5]=true; seg[6]=true; }\n" + + " else if (digit == 5) { seg[0]=true; seg[1]=false; seg[2]=true; seg[3]=true; seg[4]=false; seg[5]=true; seg[6]=true; }\n" + + " else if (digit == 6) { seg[0]=true; seg[1]=false; seg[2]=true; seg[3]=true; seg[4]=true; seg[5]=true; seg[6]=true; }\n" + + " else if (digit == 7) { seg[0]=true; seg[1]=true; seg[2]=true; seg[3]=false; seg[4]=false; seg[5]=false; seg[6]=false; }\n" + + " else if (digit == 8) { seg[0]=true; seg[1]=true; seg[2]=true; seg[3]=true; seg[4]=true; seg[5]=true; seg[6]=true; }\n" + + " else if (digit == 9) { seg[0]=true; seg[1]=true; seg[2]=true; seg[3]=true; seg[4]=false; seg[5]=true; seg[6]=true; }\n" + + " else { seg[0]=false; seg[1]=false; seg[2]=false; seg[3]=false; seg[4]=false; seg[5]=false; seg[6]=false; }\n" + + " \n" + + " // Draw each segment as a rectangle\n" + + " // Top horizontal\n" + + " if (seg[0] && p.y > 0.85 && p.x > 0.1 && p.x < 0.5) result = 1.0;\n" + + " // Top-right vertical\n" + + " if (seg[1] && p.x > 0.45 && p.y > 0.5 && p.y < 0.9) result = 1.0;\n" + + " // Bottom-right vertical\n" + + " if (seg[2] && p.x > 0.45 && p.y > 0.1 && p.y < 0.5) result = 1.0;\n" + + " // Bottom horizontal\n" + + " if (seg[3] && p.y < 0.15 && p.x > 0.1 && p.x < 0.5) result = 1.0;\n" + + " // Bottom-left vertical\n" + + " if (seg[4] && p.x < 0.15 && p.y > 0.1 && p.y < 0.5) result = 1.0;\n" + + " // Top-left vertical\n" + + " if (seg[5] && p.x < 0.15 && p.y > 0.5 && p.y < 0.9) result = 1.0;\n" + + " // Middle horizontal\n" + + " if (seg[6] && p.y > 0.45 && p.y < 0.55 && p.x > 0.1 && p.x < 0.5) result = 1.0;\n" + + " \n" + + " return result;\n" + + "}\n" + + "\n" + + "void main() {\n" + + " // Center UV coordinates\n" + + " vec2 uv = vTexCoord - 0.5;\n" + + " \n" + + " // Correct aspect ratio to make clock round\n" + + " // If aspect > 1 (wide), stretch x to compensate\n" + + " // If aspect < 1 (tall), stretch y to compensate\n" + + " if (uAspectRatio > 1.0) {\n" + + " uv.x *= uAspectRatio;\n" + + " } else {\n" + + " uv.y /= uAspectRatio;\n" + + " }\n" + + " \n" + + " // Background gradient\n" + + " vec3 bgColor = mix(vec3(0.15, 0.15, 0.2), vec3(0.25, 0.25, 0.35), vTexCoord.y);\n" + + " \n" + + " // Clock face\n" + + " float clockRadius = 0.35;\n" + + " float dist = length(uv);\n" + + " \n" + + " // Clock face circle (white with gray border)\n" + + " vec3 faceColor = vec3(0.95, 0.95, 0.92);\n" + + " float faceMask = 1.0 - smoothstep(clockRadius - 0.01, clockRadius, dist);\n" + + " float borderMask = smoothstep(clockRadius - 0.025, clockRadius - 0.015, dist) * \n" + + " (1.0 - smoothstep(clockRadius - 0.01, clockRadius, dist));\n" + + " \n" + + " // Calculate time components\n" + + " float totalSeconds = uTime;\n" + + " float hours = mod(totalSeconds / 3600.0, 12.0);\n" + + " float minutes = mod(totalSeconds / 60.0, 60.0);\n" + + " float seconds = mod(totalSeconds, 60.0);\n" + + " int frameNum = int(mod(uTime * 30.0, 10000.0)); // Assuming 30fps\n" + + " \n" + + " // Calculate hand angles (in radians, 0 = 12 o'clock, clockwise)\n" + + " float hourAngle = hours * TWO_PI / 12.0;\n" + + " float minuteAngle = minutes * TWO_PI / 60.0;\n" + + " float secondAngle = seconds * TWO_PI / 60.0;\n" + + " \n" + + " // Draw clock hands\n" + + " float hourHand = drawHand(uv, hourAngle, 0.15, 0.025);\n" + + " float minuteHand = drawHand(uv, minuteAngle, 0.25, 0.018);\n" + + " float secondHand = drawHand(uv, secondAngle, 0.28, 0.008);\n" + + " \n" + + " // Draw tick marks\n" + + " float ticks = drawTicks(uv, clockRadius - 0.02);\n" + + " \n" + + " // Center dot\n" + + " float centerDot = 1.0 - smoothstep(0.015, 0.02, dist);\n" + + " \n" + + " // Compose clock face\n" + + " vec3 clockColor = faceColor;\n" + + " clockColor = mix(clockColor, vec3(0.3, 0.3, 0.3), ticks); // Tick marks\n" + + " clockColor = mix(clockColor, vec3(0.2, 0.2, 0.25), hourHand); // Hour hand (dark)\n" + + " clockColor = mix(clockColor, vec3(0.15, 0.15, 0.2), minuteHand); // Minute hand (darker)\n" + + " clockColor = mix(clockColor, vec3(0.8, 0.1, 0.1), secondHand); // Second hand (red)\n" + + " clockColor = mix(clockColor, vec3(0.8, 0.1, 0.1), centerDot); // Center dot (red)\n" + + " clockColor = mix(clockColor, vec3(0.4, 0.35, 0.3), borderMask); // Border\n" + + " \n" + + " // Mix clock face with background\n" + + " vec3 color = mix(bgColor, clockColor, faceMask);\n" + + " \n" + + " // Draw frame counter at bottom\n" + + " float digitSize = 0.06;\n" + + " float digitSpacing = digitSize * 0.7;\n" + + " vec2 counterPos = vec2(0.35, 0.08);\n" + + " \n" + + " // Extract digits from frame number (show last 4 digits)\n" + + " int d0 = int(mod(float(frameNum), 10.0));\n" + + " int d1 = int(mod(float(frameNum / 10), 10.0));\n" + + " int d2 = int(mod(float(frameNum / 100), 10.0));\n" + + " int d3 = int(mod(float(frameNum / 1000), 10.0));\n" + + " \n" + + " // Draw digits\n" + + " float digitMask = 0.0;\n" + + " digitMask += drawDigit(vTexCoord, d3, counterPos, digitSize);\n" + + " digitMask += drawDigit(vTexCoord, d2, counterPos + vec2(digitSpacing, 0.0), digitSize);\n" + + " digitMask += drawDigit(vTexCoord, d1, counterPos + vec2(digitSpacing * 2.0, 0.0), digitSize);\n" + + " digitMask += drawDigit(vTexCoord, d0, counterPos + vec2(digitSpacing * 3.0, 0.0), digitSize);\n" + + " \n" + + " // Frame counter color (green digital display look)\n" + + " color = mix(color, vec3(0.2, 0.9, 0.3), min(digitMask, 1.0));\n" + + " \n" + + " // Add \"FRAME\" label using simple rectangles\n" + + " // (Simplified - just draw a small indicator)\n" + + " if (vTexCoord.x > 0.35 && vTexCoord.x < 0.55 && vTexCoord.y > 0.05 && vTexCoord.y < 0.065) {\n" + + " color = mix(color, vec3(0.1, 0.5, 0.2), 0.8);\n" + + " }\n" + + " \n" + + " gl_FragColor = vec4(color, 1.0);\n" + + "}\n"; + // Full-screen quad vertices private static final float[] VERTICES = { // Position (x, y) TexCoord (s, t) @@ -95,6 +286,9 @@ public class FakeGLRenderer { private int mPositionHandle; private int mTexCoordHandle; private int mTimeHandle; + private int mAspectRatioHandle; + private int mWidth = 1; + private int mHeight = 1; private long mFrameCount = 0; private boolean mInitialized = false; @@ -102,7 +296,8 @@ public class FakeGLRenderer { public enum PatternType { SOLID, // Solid gray - most compressible GRADIENT, // Simple gradient - TEXTURE // Textured pattern - similar to real video + TEXTURE, // Textured pattern - similar to real video + CLOCK // Analog clock with hour, minute, second hands and frame counter } private PatternType mPatternType = PatternType.TEXTURE; @@ -126,25 +321,49 @@ public void init() { mVertexBuffer.put(VERTICES).position(0); // Create shader program - mProgram = createProgram(VERTEX_SHADER, getFragmentShaderForPattern(mPatternType)); + String fragmentShader = getFragmentShaderForPattern(mPatternType); + Log.d(TAG, "Creating shader program for pattern: " + mPatternType); + mProgram = createProgram(VERTEX_SHADER, fragmentShader); if (mProgram == 0) { + Log.e(TAG, "Failed to create GL program for pattern: " + mPatternType); throw new RuntimeException("Failed to create GL program"); } + Log.d(TAG, "Created GL program: " + mProgram); // Get attribute/uniform locations mPositionHandle = GLES20.glGetAttribLocation(mProgram, "aPosition"); - checkGlError("glGetAttribLocation aPosition"); + if (mPositionHandle < 0) { + Log.e(TAG, "Failed to get aPosition attribute location (got " + mPositionHandle + ")"); + throw new RuntimeException("Failed to get aPosition attribute location"); + } + Log.d(TAG, "aPosition handle: " + mPositionHandle); mTexCoordHandle = GLES20.glGetAttribLocation(mProgram, "aTexCoord"); - checkGlError("glGetAttribLocation aTexCoord"); + if (mTexCoordHandle < 0) { + Log.e(TAG, "Failed to get aTexCoord attribute location (got " + mTexCoordHandle + ")"); + throw new RuntimeException("Failed to get aTexCoord attribute location"); + } + Log.d(TAG, "aTexCoord handle: " + mTexCoordHandle); mTimeHandle = GLES20.glGetUniformLocation(mProgram, "uTime"); - checkGlError("glGetUniformLocation uTime"); + Log.d(TAG, "uTime handle: " + mTimeHandle); + + // Get aspect ratio uniform location (only used by clock shader, but safe to query) + mAspectRatioHandle = GLES20.glGetUniformLocation(mProgram, "uAspectRatio"); + Log.d(TAG, "uAspectRatio handle: " + mAspectRatioHandle); mInitialized = true; Log.d(TAG, "FakeGLRenderer initialized with pattern: " + mPatternType); } + /** + * Set the video dimensions. Used to calculate aspect ratio for round clock. + */ + public void setDimensions(int width, int height) { + mWidth = width > 0 ? width : 1; + mHeight = height > 0 ? height : 1; + } + /** * Set the pattern type. Must call before init() or call release() + init() to switch. */ @@ -166,29 +385,61 @@ public void renderFrame(long timestampUs) { init(); } + // Double-check handles are valid (defensive) + if (mProgram == 0 || mPositionHandle < 0 || mTexCoordHandle < 0) { + Log.e(TAG, "Invalid GL state: program=" + mProgram + + ", positionHandle=" + mPositionHandle + + ", texCoordHandle=" + mTexCoordHandle); + // Try to re-initialize + mInitialized = false; + init(); + } + // Use shader program GLES20.glUseProgram(mProgram); - checkGlError("glUseProgram"); + int error = GLES20.glGetError(); + if (error != GLES20.GL_NO_ERROR) { + Log.e(TAG, "glUseProgram failed with error 0x" + Integer.toHexString(error) + ", program=" + mProgram); + throw new RuntimeException("glUseProgram failed"); + } // Set time uniform (for animation) float timeValue = (float) timestampUs / 1000000.0f; // Convert to seconds - GLES20.glUniform1f(mTimeHandle, timeValue); - checkGlError("glUniform1f uTime"); + if (mTimeHandle >= 0) { + GLES20.glUniform1f(mTimeHandle, timeValue); + } + + // Set aspect ratio uniform (for round clock) + if (mAspectRatioHandle >= 0) { + float aspectRatio = (float) mWidth / (float) mHeight; + GLES20.glUniform1f(mAspectRatioHandle, aspectRatio); + } // Enable vertex arrays GLES20.glEnableVertexAttribArray(mPositionHandle); GLES20.glEnableVertexAttribArray(mTexCoordHandle); - // Set vertex data + // Set vertex data - position mVertexBuffer.position(0); GLES20.glVertexAttribPointer(mPositionHandle, COORDS_PER_VERTEX, GLES20.GL_FLOAT, false, VERTEX_STRIDE, mVertexBuffer); - checkGlError("glVertexAttribPointer aPosition"); + error = GLES20.glGetError(); + if (error != GLES20.GL_NO_ERROR) { + Log.e(TAG, "glVertexAttribPointer aPosition failed: error=0x" + Integer.toHexString(error) + + ", handle=" + mPositionHandle + ", buffer=" + mVertexBuffer); + throw new RuntimeException("glVertexAttribPointer aPosition: glError 0x" + Integer.toHexString(error)); + } + // Set vertex data - texcoord mVertexBuffer.position(COORDS_PER_VERTEX); GLES20.glVertexAttribPointer(mTexCoordHandle, TEXCOORDS_PER_VERTEX, GLES20.GL_FLOAT, false, VERTEX_STRIDE, mVertexBuffer); - checkGlError("glVertexAttribPointer aTexCoord"); + error = GLES20.glGetError(); + if (error != GLES20.GL_NO_ERROR) { + Log.e(TAG, "glVertexAttribPointer aTexCoord failed: error=0x" + Integer.toHexString(error) + + ", handle=" + mTexCoordHandle); + throw new RuntimeException("glVertexAttribPointer aTexCoord: glError 0x" + Integer.toHexString(error)); + } // Draw full-screen quad GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4); @@ -221,6 +472,8 @@ private String getFragmentShaderForPattern(PatternType type) { return FRAGMENT_SHADER_GRADIENT; case TEXTURE: return FRAGMENT_SHADER_TEXTURE; + case CLOCK: + return FRAGMENT_SHADER_CLOCK; default: return FRAGMENT_SHADER_TEXTURE; } diff --git a/app/src/main/java/com/facebook/encapp/utils/OutputMultiplier.java b/app/src/main/java/com/facebook/encapp/utils/OutputMultiplier.java index 9d040bda..2a08c0f5 100644 --- a/app/src/main/java/com/facebook/encapp/utils/OutputMultiplier.java +++ b/app/src/main/java/com/facebook/encapp/utils/OutputMultiplier.java @@ -24,21 +24,21 @@ abstract class RenderBufferObject { long mTimestampUs; int mFrameCount; Statistics mStats; // For encoding measurement - + RenderBufferObject(long timestampUs, int frameCount, Statistics stats) { mTimestampUs = timestampUs; mFrameCount = frameCount; mStats = stats; } - + long getTimestampUs() { return mTimestampUs; } - + int getFrameCount() { return mFrameCount; } - + Statistics getStats() { return mStats; } @@ -48,7 +48,7 @@ class RenderFrameBuffer extends RenderBufferObject { MediaCodec mCodec; int mBufferId; MediaCodec.BufferInfo mInfo; - + RenderFrameBuffer(MediaCodec codec, int id, MediaCodec.BufferInfo info, int frameCount, Statistics stats) { super(info.presentationTimeUs, frameCount, stats); mCodec = codec; @@ -59,7 +59,7 @@ class RenderFrameBuffer extends RenderBufferObject { class RenderBitmapBuffer extends RenderBufferObject { Bitmap mBitmap; - + RenderBitmapBuffer(Bitmap bitmap, long timestampUs, int frameCount, Statistics stats) { super(timestampUs, frameCount, stats); mBitmap = bitmap; @@ -68,7 +68,7 @@ class RenderBitmapBuffer extends RenderBufferObject { class RenderGLPatternBuffer extends RenderBufferObject { FakeGLRenderer mGLRenderer; - + RenderGLPatternBuffer(FakeGLRenderer glRenderer, long timestampUs, int frameCount, Statistics stats) { super(timestampUs, frameCount, stats); mGLRenderer = glRenderer; @@ -181,7 +181,7 @@ public long awaitNewImage() { public void newBitmapAvailable(Bitmap bitmap, long timestampUsec, int frameCount, Statistics stats) { mRenderer.newBitmapAvailable(bitmap, timestampUsec, frameCount, stats); } - + /** * Signal that a new GL-rendered frame is available (for fake input). * This is used when rendering synthetic patterns directly with GL. @@ -275,13 +275,13 @@ public void run() { mSurfaceObject = null; // we do not need it anymore mOutputSurfaces.add(mMasterSurface); mMasterSurface.makeCurrent(); - + // Create shader program for camera input (external OES texture) mFullFrameBlit = new FullFrameRect( new Texture2dProgram(mProgramType)); mTextureId = mFullFrameBlit.createTextureObject(); mInputTexture = new SurfaceTexture(mTextureId); - + // Create shader program for bitmap input (2D texture) mBitmapBlit = new FullFrameRect( new Texture2dProgram(Texture2dProgram.ProgramType.TEXTURE_2D)); @@ -432,7 +432,7 @@ public void drawBufferSwap() { } } } - + boolean isGLPattern = false; try { mLatestTimestampNsec = timeNs; @@ -457,7 +457,7 @@ public void drawBufferSwap() { // not important } - + // For GL patterns, we've already rendered directly to surfaces // For texture/bitmap, we need to blit to all surfaces if (!isGLPattern) { @@ -467,7 +467,7 @@ public void drawBufferSwap() { // Use the appropriate blitter and texture based on input type FullFrameRect blitter = (mBitmapTextureId != -1) ? mBitmapBlit : mFullFrameBlit; int textureToUse = (mBitmapTextureId != -1) ? mBitmapTextureId : mTextureId; - + for (FrameswapControl surface : mOutputSurfaces) { if (surface.keepFrame()) { surface.makeCurrent(); @@ -479,7 +479,7 @@ public void drawBufferSwap() { surface.swapBuffers(); } } - + // NOW start encoding measurement - frame has been submitted to encoder! // Called ONCE per frame after all surfaces are swapped, not once per surface. // This measures only encoder time, not preparation/rendering time. @@ -492,11 +492,13 @@ public void drawBufferSwap() { synchronized (mLock) { for (FrameswapControl surface : mOutputSurfaces) { if (surface.keepFrame()) { + // Must make surface current before swap on some EGL implementations + surface.makeCurrent(); surface.setPresentationTime(mLatestTimestampNsec); surface.swapBuffers(); } } - + // NOW start encoding measurement - frame has been submitted to encoder! // Called ONCE per frame after all surfaces are swapped, not once per surface. // This measures only encoder time, not GL rendering or queuing time. @@ -566,14 +568,14 @@ public void drawBitmap(Bitmap bitmap) { return; } mMasterSurface.makeCurrent(); - + // Create a separate 2D texture for bitmap input (only once) if (mBitmapTextureId == -1) { int[] textures = new int[1]; GLES20.glGenTextures(1, textures, 0); mBitmapTextureId = textures[0]; Log.d(TAG, "Created 2D texture for bitmap input: " + mBitmapTextureId); - + GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mBitmapTextureId); GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER, GLES20.GL_LINEAR); GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER, GLES20.GL_LINEAR); @@ -583,11 +585,11 @@ public void drawBitmap(Bitmap bitmap) { } else { GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mBitmapTextureId); } - + // Load bitmap into the 2D texture GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, bitmap, 0); GlUtil.checkGlError("GLUtils.texImage2D"); - + // Set up transform matrix for bitmap (no transform from SurfaceTexture) Matrix.setIdentityM(mTmpMatrix, 0); Matrix.rotateM(mTmpMatrix, 0, 180, 1f, 0, 0); @@ -596,7 +598,7 @@ public void drawBitmap(Bitmap bitmap) { ex.printStackTrace(); } } - + /** * Render GL pattern directly to all surfaces - FAST PATH! * No bitmap, no texture upload, just pure GL rendering. @@ -607,7 +609,7 @@ public void renderGLPattern(FakeGLRenderer glRenderer, long timestampUs) { Log.d(TAG, "Skipping GL render after shutdown"); return; } - + // Render pattern to all surfaces synchronized (mLock) { for (FrameswapControl surface : mOutputSurfaces) { @@ -616,10 +618,10 @@ public void renderGLPattern(FakeGLRenderer glRenderer, long timestampUs) { int width = surface.getWidth(); int height = surface.getHeight(); GLES20.glViewport(0, 0, width, height); - + // Render GL pattern directly - ZERO CPU overhead! glRenderer.renderFrame(timestampUs); - + // No need to set transform matrix - pattern fills viewport } } @@ -669,7 +671,7 @@ public void newBitmapAvailable(Bitmap bitmap, long timestampUsec, int frameCount } } } - + /** * Render a GL pattern frame - queues the work to be done on GL thread. * This is the fast path for fake input - no bitmap overhead! @@ -682,10 +684,10 @@ public void newGLPatternFrame(FakeGLRenderer glRenderer, long timestampUsec, int frameAvailable += 1; mInputFrameLock.notifyAll(); } - + // REMOVED BLOCKING WAIT - GL rendering is async, no need to wait for frame drawn // The bitmap path needs to wait because it copies memory, but GL just queues work - + // NOTE: startEncodingFrame will be called AFTER swapBuffers() in drawBufferSwap() // to measure only the encoding time, not the GL rendering + queuing time. } diff --git a/native/x264_enc/jni/src/x264_enc.cpp b/native/x264_enc/jni/src/x264_enc.cpp index 32c849ba..80e8b9d6 100644 --- a/native/x264_enc/jni/src/x264_enc.cpp +++ b/native/x264_enc/jni/src/x264_enc.cpp @@ -32,8 +32,6 @@ using namespace std; extern "C" { x264_t *encoder = NULL; -x264_nal_t *nal; -int nnal; int _width = -1; int _height = -1; int _colorformat = -1; @@ -190,14 +188,16 @@ jint init_encoder(JNIEnv *env, jobject thiz, jobjectArray params, jint width, x264Params.i_width = _width; x264Params.i_height = _height; - // TODO: remove this, currenlty the encoder is broken without it - x264Params.i_threads = 1; x264Params.i_csp = _colorformat; x264Params.i_bitdepth = _bitdepth; x264Params.i_fps_num = 30; x264Params.i_fps_den = 1; x264Params.i_timebase_num = 1; - x264Params.i_timebase_den = 1000000; // Nanosecs + x264Params.i_timebase_den = 1000000; // Microsecs + // Output NALs in Annex B format (start codes) - MediaMuxer handles conversion + x264Params.b_annexb = 1; + // Disable repeat headers - we handle SPS/PPS separately + x264Params.b_repeat_headers = 0; LOGD("Open x264 encoder"); encoder = x264_encoder_open(&x264Params); if (!encoder) { @@ -219,6 +219,8 @@ jbyteArray get_header(JNIEnv *env, jobject thiz, jbyteArray headerArray) { return NULL; } + x264_nal_t *nal; + int nnal; int size_of_headers = x264_encoder_headers(encoder, &nal, &nnal); jbyte *buf = new jbyte[size_of_headers]; memset(buf, 0, size_of_headers); @@ -246,6 +248,45 @@ jbyteArray get_header(JNIEnv *env, jobject thiz, jbyteArray headerArray) { return ret; } +static int copy_nal_to_output(x264_nal_t *nal, int nnal, jbyte *output_data, + int output_size) { + // Start with 2-byte offset - required for MediaMuxer compatibility + int offset = 2; + output_data[0] = 0; + output_data[1] = 0; + + for (int i = 0; i < nnal; i++) { + // Skip header NALs - they're handled separately via get_header() + if (nal[i].i_type == NAL_SPS || nal[i].i_type == NAL_PPS || + nal[i].i_type == NAL_SEI || nal[i].i_type == NAL_AUD || + nal[i].i_type == NAL_FILLER) { + continue; + } + if (offset + nal[i].i_payload <= output_size) { + memcpy(output_data + offset, nal[i].p_payload, nal[i].i_payload); + offset += nal[i].i_payload; + } else { + LOGE("Output buffer too small for NAL unit"); + } + } + return offset; +} + +static void update_frame_info(JNIEnv *env, jobject frameInfo, + x264_picture_t *pic_out, int frame_size) { + jclass infoClass = env->FindClass("com/facebook/encapp/utils/FrameInfo"); + jfieldID isIframeId = env->GetFieldID(infoClass, "mIsIframe", "Z"); + jfieldID ptsId = env->GetFieldID(infoClass, "mPts", "J"); + jfieldID dtsId = env->GetFieldID(infoClass, "mDts", "J"); + jfieldID sizeId = env->GetFieldID(infoClass, "mSize", "J"); + + env->SetLongField(frameInfo, sizeId, frame_size); + env->SetLongField(frameInfo, ptsId, pic_out->i_pts); + env->SetLongField(frameInfo, dtsId, pic_out->i_dts); + env->SetBooleanField(frameInfo, isIframeId, pic_out->b_keyframe); +} + +// Returns frame size, 0 if buffered (B-frames), -1 on error jint encode(JNIEnv *env, jobject thiz, jbyteArray input, jbyteArray output, jobject frameInfo) { LOGD("Encoding frame"); @@ -255,32 +296,33 @@ jint encode(JNIEnv *env, jobject thiz, jbyteArray input, jbyteArray output, } jclass infoClass = env->FindClass("com/facebook/encapp/utils/FrameInfo"); - jfieldID isIframeId = env->GetFieldID(infoClass, "mIsIframe", "Z"); jfieldID ptsId = env->GetFieldID(infoClass, "mPts", "J"); - jfieldID dtsId = env->GetFieldID(infoClass, "mDts", "J"); - jfieldID sizeId = env->GetFieldID(infoClass, "mSize", "J"); - // All interaction must be done before locking java... + x264_nal_t *nal; + int nnal; + x264_picture_t pic_in = {0}; x264_picture_t pic_out = {0}; - // TODO: We are assuming yuv420p, add check... - int ySize = _width * _height; // Stride? + int ySize = _width * _height; int uvSize = (int)(ySize / 4.0f); + int inputSize = ySize + uvSize * 2; x264_picture_init(&pic_in); pic_in.img.i_csp = _colorformat; - // TODO: hard code, really? pic_in.img.i_plane = 3; long pts = env->GetLongField(frameInfo, ptsId); LOGD("Set pts: %ld", pts); - pic_in.i_pts = pts; // Convert to milliseconds + pic_in.i_pts = pts; - // Now we are locking java - jbyte *input_data = (jbyte *)env->GetPrimitiveArrayCritical(input, 0); - jbyte *output_data = (jbyte *)env->GetPrimitiveArrayCritical(output, 0); + jsize input_array_size = env->GetArrayLength(input); + jsize output_array_size = env->GetArrayLength(output); + // Use local buffers instead of GetPrimitiveArrayCritical to allow GC + jbyte *input_data = new jbyte[inputSize]; + jbyte *output_data = new jbyte[output_array_size]; + env->GetByteArrayRegion(input, 0, inputSize, input_data); pic_in.img.plane[0] = (uint8_t *)input_data; pic_in.img.plane[1] = (uint8_t *)(input_data + ySize); pic_in.img.plane[2] = (uint8_t *)(input_data + ySize + uvSize); @@ -290,41 +332,68 @@ jint encode(JNIEnv *env, jobject thiz, jbyteArray input, jbyteArray output, pic_in.img.i_stride[2] = _width / 2; int frame_size = x264_encoder_encode(encoder, &nal, &nnal, &pic_in, &pic_out); - if (frame_size >= 0) { - // TODO: Added total_size = 2 for debugging purpose - int total_size = 2; - for (int i = 0; i < nnal; i++) { - total_size += nal[i].i_payload; - } - int offset = 2; - for (int i = 0; i < nnal; i++) { - if (nal[i].i_type == NAL_SPS || nal[i].i_type == NAL_PPS || - nal[i].i_type == NAL_SEI || nal[i].i_type == NAL_AUD || - nal[i].i_type == NAL_FILLER) { - continue; - } - memcpy(output_data + offset, nal[i].p_payload, nal[i].i_payload); - offset += nal[i].i_payload; - } - frame_size = total_size; + + int total_size = 0; + if (frame_size > 0) { + total_size = copy_nal_to_output(nal, nnal, output_data, output_array_size); + env->SetByteArrayRegion(output, 0, total_size, output_data); + update_frame_info(env, frameInfo, &pic_out, total_size); + } else if (frame_size == 0) { + // Frame buffered (B-frame reordering) + LOGD("Frame buffered, no output yet (encoder delay)"); + update_frame_info(env, frameInfo, &pic_out, 0); + } else { + LOGE("x264_encoder_encode failed with error: %d", frame_size); } + delete[] input_data; + delete[] output_data; - env->ReleasePrimitiveArrayCritical(input, input_data, 0); - env->ReleasePrimitiveArrayCritical(output, output_data, 0); + return total_size; +} - // Set data from the encoding process - env->SetLongField(frameInfo, sizeId, frame_size); - env->SetLongField(frameInfo, ptsId, pic_out.i_pts); - env->SetLongField(frameInfo, dtsId, pic_out.i_dts); - env->SetBooleanField(frameInfo, isIframeId, pic_out.b_keyframe); - // Do we need additional info? - // LOGD("Not saved: Pic type: %d", pic_out.i_type); - // TODO: we also have a complete list of params. Maybe with a debug flag we - // could push this to java? - - // x264_image_properties_t holds psnr and ssim as well (potentially, if - // enabled) - return frame_size; +// Flush buffered frames. Call until returns 0. +jint flush_encoder(JNIEnv *env, jobject thiz, jbyteArray output, + jobject frameInfo) { + LOGD("Flushing encoder"); + if (!encoder) { + LOGI("Encoder is not initialized for flushing"); + return -1; + } + + x264_nal_t *nal; + int nnal; + x264_picture_t pic_out = {0}; + + jsize output_array_size = env->GetArrayLength(output); + jbyte *output_data = new jbyte[output_array_size]; + + // NULL input flushes buffered frames + int frame_size = + x264_encoder_encode(encoder, &nal, &nnal, NULL, &pic_out); + + int total_size = 0; + if (frame_size > 0) { + total_size = copy_nal_to_output(nal, nnal, output_data, output_array_size); + env->SetByteArrayRegion(output, 0, total_size, output_data); + update_frame_info(env, frameInfo, &pic_out, total_size); + LOGD("Flushed frame: pts=%ld, dts=%ld, size=%d", (long)pic_out.i_pts, + (long)pic_out.i_dts, total_size); + } else if (frame_size == 0) { + LOGD("Encoder flush complete, no more buffered frames"); + update_frame_info(env, frameInfo, &pic_out, 0); + } else { + LOGE("x264_encoder_encode (flush) failed with error: %d", frame_size); + } + + delete[] output_data; + return total_size; +} + +jint get_delayed_frames(JNIEnv *env, jobject thiz) { + if (!encoder) { + return 0; + } + return x264_encoder_delayed_frames(encoder); } void update_settings(JNIEnv *env, jobject thiz, jobjectArray params) { @@ -470,6 +539,25 @@ jobjectArray get_all_settings(JNIEnv *env, jobject thiz) { parameterClass, paramConstructor, env->NewStringUTF("i_timebase_den"), env->NewStringUTF("intType"), env->NewStringUTF(buffer))); + snprintf(buffer, len, "%d", info.i_threads); + params.push_back(env->NewObject( + parameterClass, paramConstructor, env->NewStringUTF("i_threads"), + env->NewStringUTF("intType"), env->NewStringUTF(buffer))); + snprintf(buffer, len, "%d", info.i_lookahead_threads); + params.push_back(env->NewObject( + parameterClass, paramConstructor, + env->NewStringUTF("i_lookahead_threads"), env->NewStringUTF("intType"), + env->NewStringUTF(buffer))); + snprintf(buffer, len, "%d", info.b_sliced_threads); + params.push_back(env->NewObject( + parameterClass, paramConstructor, env->NewStringUTF("b_sliced_threads"), + env->NewStringUTF("intType"), env->NewStringUTF(buffer))); + + snprintf(buffer, len, "%d", info.i_bframe); + params.push_back(env->NewObject( + parameterClass, paramConstructor, env->NewStringUTF("i_bframe"), + env->NewStringUTF("intType"), env->NewStringUTF(buffer))); + jobjectArray ret = env->NewObjectArray(params.size(), parameterClass, NULL); int index = 0; for (auto element : params) { @@ -492,6 +580,9 @@ static JNINativeMethod methods[] = { (void *)&init_encoder}, {"getHeader", "()[B", (void *)&get_header}, {"encode", "([B[BLcom/facebook/encapp/utils/FrameInfo;)I", (void *)&encode}, + {"flushEncoder", "([BLcom/facebook/encapp/utils/FrameInfo;)I", + (void *)&flush_encoder}, + {"getDelayedFrames", "()I", (void *)&get_delayed_frames}, {"close", "()V", (void *)&close}, {"getAllEncoderSettings", "()[Lcom/facebook/encapp/utils/StringParameter;", (void *)&get_all_settings}, diff --git a/scripts/encapp.py b/scripts/encapp.py index 4af5c832..70946447 100755 --- a/scripts/encapp.py +++ b/scripts/encapp.py @@ -438,7 +438,7 @@ def update_file_paths(test, device_workdir=default_values["device_workdir"]): test.configure.codec = f"{device_workdir}/{basename}" # camera tests do not need any input file paths # fake_input tests do not need any input file paths - if test.input.filepath == "camera" or test.input.filepath == "fake_input": + if test.input.filepath == "camera" or test.input.filepath.startswith("fake_input"): return # update main test basename = os.path.basename(test.input.filepath) @@ -450,14 +450,14 @@ def update_file_paths(test, device_workdir=default_values["device_workdir"]): def get_media_files(test, all_files): # TODO: remove? - if test.input.filepath != "camera" and test.input.filepath != "fake_input": + if test.input.filepath != "camera" and not test.input.filepath.startswidth("fake_input"): name = os.path.basename(test.input.filepath) if name not in all_files: all_files.add(name) for subtest in test.parallel.test: if ( subtest.input.filepath != "camera" - and subtest.input.filepath != "fake_input" + and not subtest.input.filepath.startswidth("fake_input") ): get_media_files(subtest, all_files) return @@ -469,26 +469,26 @@ def add_files_to_push(test, files_to_push): full_path = os.path.expanduser(test.configure.codec) if full_path not in files_to_push: files_to_push.add(full_path) - if test.input.filepath != "camera" and test.input.filepath != "fake_input": + if test.input.filepath != "camera" and not test.input.filepath.startswith("fake_input"): full_path = os.path.expanduser(test.input.filepath) if full_path not in files_to_push: files_to_push.add(full_path) for subtest in test.parallel.test: if ( subtest.input.filepath != "camera" - and subtest.input.filepath != "fake_input" + and not subtest.input.filepath.startswith("fake_input") ): add_files_to_push(subtest, files_to_push) return def update_media_files(test, options): - if test.input.filepath != "camera" and test.input.filepath != "fake_input": + if test.input.filepath != "camera" and not test.input.filepath.startswith("fake_input"): update_media(test, options) for subtest in test.parallel.test: if ( subtest.input.filepath != "camera" - and subtest.input.filepath != "fake_input" + and not subtest.input.filepath.startswidth("fake_input") ): update_media_files(subtest, options) return @@ -2834,7 +2834,7 @@ def process_options(options): if ( videofile != "[generate]" and videofile != "camera" - and videofile != "fake_input" + and not videofile.startswith("fake_input") ): assert os.path.exists(videofile) and os.access(videofile, os.R_OK), ( f"file {videofile} does not exist" diff --git a/tests/bitrate_buffer_x264.pbtxt b/tests/bitrate_buffer_x264.pbtxt index 5f499185..378bf53c 100644 --- a/tests/bitrate_buffer_x264.pbtxt +++ b/tests/bitrate_buffer_x264.pbtxt @@ -11,6 +11,11 @@ test { bitrate: "500 kbps" bitrate_mode: cbr i_frame_interval: 10 + parameter { + key: "i_threads" + type: intType + value: "1" + } parameter { key: "tune" type: stringType diff --git a/tests/bitrate_buffer_x264_multithread.pbtxt b/tests/bitrate_buffer_x264_multithread.pbtxt new file mode 100644 index 00000000..5b441588 --- /dev/null +++ b/tests/bitrate_buffer_x264_multithread.pbtxt @@ -0,0 +1,30 @@ +test { + input { + filepath: "/tmp/akiyo_qcif.y4m" + } + common { + id: "bitrate_buffer_multithread" + description: "Verify x264 encoding with multi-threading and B-frames" + } + configure { + codec: "/tmp/libnativeencoder.so" + bitrate: "500 kbps" + bitrate_mode: cbr + i_frame_interval: 10 + parameter { + key: "preset" + type: stringType + value: "medium" + } + parameter { + key: "i_threads" + type: intType + value: "4" + } + parameter { + key: "i_bframe" + type: intType + value: "3" + } + } +}