diff --git a/src/avp/component.py b/src/avp/component.py
index 01d4e44..6c5e381 100644
--- a/src/avp/component.py
+++ b/src/avp/component.py
@@ -910,11 +910,12 @@ def __init__(self, parent, oldWidgetVals, modifiedVals):
# Determine if this update is mergeable
self.id_ = -1
- if len(self.modifiedVals) == 1 and self.parent.mergeUndo:
- attr, val = self.modifiedVals.popitem()
- self.id_ = sum([ord(letter) for letter in attr[-14:]])
- self.modifiedVals[attr] = val
- else:
+ if self.parent.mergeUndo:
+ if len(self.modifiedVals) == 1:
+ attr, val = self.modifiedVals.popitem()
+ self.id_ = sum([ord(letter) for letter in attr[-14:]])
+ self.modifiedVals[attr] = val
+ return
log.warning(
"%s component settings changed at once. (%s)",
len(self.modifiedVals),
diff --git a/src/avp/components/image.py b/src/avp/components/image.py
index 2393611..59ce8be 100644
--- a/src/avp/components/image.py
+++ b/src/avp/components/image.py
@@ -1,29 +1,40 @@
-from PIL import Image, ImageDraw, ImageEnhance
-from PyQt6 import QtGui, QtCore, QtWidgets
+from PIL import Image, ImageOps, ImageEnhance
+from PyQt6 import QtWidgets
import os
+from copy import copy
from ..component import Component
from ..toolkit.frame import BlankFrame
+from .original import Component as Visualizer
class Component(Component):
name = "Image"
- version = "1.0.1"
+ version = "2.0.0"
def widget(self, *args):
super().widget(*args)
+
+ # cache a modified image object in case we are rendering beyond frame 1
+ self.existingImage = None
+
self.page.pushButton_image.clicked.connect(self.pickImage)
+ self.page.comboBox_resizeMode.addItem("Scale")
+ self.page.comboBox_resizeMode.addItem("Cover")
+ self.page.comboBox_resizeMode.addItem("Stretch")
+ self.page.comboBox_resizeMode.setCurrentIndex(0)
self.trackWidgets(
{
"imagePath": self.page.lineEdit_image,
"scale": self.page.spinBox_scale,
- "stretchScale": self.page.spinBox_scale_stretch,
"rotate": self.page.spinBox_rotate,
"color": self.page.spinBox_color,
"xPosition": self.page.spinBox_x,
"yPosition": self.page.spinBox_y,
- "stretched": self.page.checkBox_stretch,
+ "resizeMode": self.page.comboBox_resizeMode,
"mirror": self.page.checkBox_mirror,
+ "respondToAudio": self.page.checkBox_respondToAudio,
+ "sensitivity": self.page.spinBox_sensitivity,
},
presetNames={
"imagePath": "image",
@@ -33,11 +44,19 @@ def widget(self, *args):
relativeWidgets=["xPosition", "yPosition", "scale"],
)
+ def update(self):
+ self.page.spinBox_sensitivity.setEnabled(
+ self.page.checkBox_respondToAudio.isChecked()
+ )
+ self.page.spinBox_scale.setEnabled(
+ self.page.comboBox_resizeMode.currentIndex() == 0
+ )
+
def previewRender(self):
- return self.drawFrame(self.width, self.height)
+ return self.drawFrame(self.width, self.height, None)
def properties(self):
- props = ["static"]
+ props = ["pcm" if self.respondToAudio else "static"]
if not os.path.exists(self.imagePath):
props.append("error")
return props
@@ -48,34 +67,103 @@ def error(self):
if not os.path.exists(self.imagePath):
return "The image selected does not exist!"
+ def preFrameRender(self, **kwargs):
+ super().preFrameRender(**kwargs)
+ if not self.respondToAudio:
+ return
+
+ smoothConstantDown = 0.08 + 0
+ smoothConstantUp = 0.8 - 0
+ self.lastSpectrum = None
+ self.spectrumArray = {}
+
+ for i in range(0, len(self.completeAudioArray), self.sampleSize):
+ if self.canceled:
+ break
+ self.lastSpectrum = Visualizer.transformData(
+ i,
+ self.completeAudioArray,
+ self.sampleSize,
+ smoothConstantDown,
+ smoothConstantUp,
+ self.lastSpectrum,
+ self.sensitivity,
+ )
+ self.spectrumArray[i] = copy(self.lastSpectrum)
+
+ progress = int(100 * (i / len(self.completeAudioArray)))
+ if progress >= 100:
+ progress = 100
+ pStr = "Analyzing audio: " + str(progress) + "%"
+ self.progressBarSetText.emit(pStr)
+ self.progressBarUpdate.emit(int(progress))
+
def frameRender(self, frameNo):
- return self.drawFrame(self.width, self.height)
+ return self.drawFrame(
+ self.width,
+ self.height,
+ (
+ None
+ if not self.respondToAudio
+ else self.spectrumArray[frameNo * self.sampleSize]
+ ),
+ )
- def drawFrame(self, width, height):
+ def drawFrame(self, width, height, dynamicScale):
frame = BlankFrame(width, height)
if self.imagePath and os.path.exists(self.imagePath):
- scale = self.scale if not self.stretched else self.stretchScale
- image = Image.open(self.imagePath)
-
- # Modify image's appearance
- if self.color != 100:
- image = ImageEnhance.Color(image).enhance(float(self.color / 100))
- if self.mirror:
- image = image.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
- if self.stretched and image.size != (width, height):
- image = image.resize((width, height), Image.Resampling.LANCZOS)
- if scale != 100:
- newHeight = int((image.height / 100) * scale)
- newWidth = int((image.width / 100) * scale)
- image = image.resize((newWidth, newHeight), Image.Resampling.LANCZOS)
+ if self.existingImage:
+ image = self.existingImage
+ else:
+ image = Image.open(self.imagePath)
+ # Modify static image appearance
+ if self.color != 100:
+ image = ImageEnhance.Color(image).enhance(float(self.color / 100))
+ if self.mirror:
+ image = image.transpose(Image.Transpose.FLIP_LEFT_RIGHT)
+ if self.resizeMode == 1: # Cover
+ image = ImageOps.fit(
+ image, (width, height), Image.Resampling.LANCZOS
+ )
+ elif self.resizeMode == 2: # Stretch
+ image = image.resize((width, height), Image.Resampling.LANCZOS)
+ elif self.scale != 100: # Scale
+ newHeight = int((image.height / 100) * self.scale)
+ newWidth = int((image.width / 100) * self.scale)
+ image = image.resize(
+ (newWidth, newHeight), Image.Resampling.LANCZOS
+ )
+ self.existingImage = image
+
+ # Respond to audio
+ scale = 0
+ if dynamicScale is not None:
+ scale = dynamicScale[36 * 4] / 4
+ image = ImageOps.contain(
+ image,
+ (
+ image.width + int(scale / 2),
+ image.height + int(scale / 2),
+ ),
+ Image.Resampling.LANCZOS,
+ )
# Paste image at correct position
- frame.paste(image, box=(self.xPosition, self.yPosition))
+ frame.paste(
+ image,
+ box=(
+ self.xPosition - (0 if not self.respondToAudio else int(scale / 2)),
+ self.yPosition - (0 if not self.respondToAudio else int(scale / 2)),
+ ),
+ )
if self.rotate != 0:
frame = frame.rotate(self.rotate)
return frame
+ def postFrameRender(self):
+ self.existingImage = None
+
def pickImage(self):
imgDir = self.settings.value("componentDir", os.path.expanduser("~"))
filename, _ = QtWidgets.QFileDialog.getOpenFileName(
@@ -106,24 +194,3 @@ def command(self, arg):
def commandHelp(self):
print("Load an image:\n path=/filepath/to/image.png")
-
- def savePreset(self):
- # Maintain the illusion that the scale spinbox is one widget
- scaleBox = self.page.spinBox_scale
- stretchScaleBox = self.page.spinBox_scale_stretch
- if self.page.checkBox_stretch.isChecked():
- scaleBox.setValue(stretchScaleBox.value())
- else:
- stretchScaleBox.setValue(scaleBox.value())
- return super().savePreset()
-
- def update(self):
- # Maintain the illusion that the scale spinbox is one widget
- scaleBox = self.page.spinBox_scale
- stretchScaleBox = self.page.spinBox_scale_stretch
- if self.page.checkBox_stretch.isChecked():
- scaleBox.setVisible(False)
- stretchScaleBox.setVisible(True)
- else:
- scaleBox.setVisible(True)
- stretchScaleBox.setVisible(False)
diff --git a/src/avp/components/image.ui b/src/avp/components/image.ui
index 2dad127..45f3747 100644
--- a/src/avp/components/image.ui
+++ b/src/avp/components/image.ui
@@ -84,10 +84,10 @@
-
- Qt::Horizontal
+ Qt::Orientation::Horizontal
- QSizePolicy::Fixed
+ QSizePolicy::Policy::Fixed
@@ -181,26 +181,29 @@
-
-
-
-
- Stretch
+
+
+
+ 0
+ 0
+
-
- false
+
+ Resize
-
-
+
+
+ -
+
- Qt::Horizontal
-
-
- QSizePolicy::Fixed
+ Qt::Orientation::Horizontal
- 5
+ 40
20
@@ -208,25 +211,34 @@
-
+
+ Qt::LayoutDirection::RightToLeft
+
Mirror
-
-
+
+
+
+ 0
+ 0
+
+
Rotate
- Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter
+ Qt::AlignmentFlag::AlignRight|Qt::AlignmentFlag::AlignTrailing|Qt::AlignmentFlag::AlignVCenter
-
- QAbstractSpinBox::UpDownArrows
+ QAbstractSpinBox::ButtonSymbols::UpDownArrows
°
@@ -242,24 +254,12 @@
+
+
+ -
+
-
-
-
- Qt::Horizontal
-
-
- QSizePolicy::Fixed
-
-
-
- 10
- 20
-
-
-
-
- -
-
+
0
@@ -270,14 +270,14 @@
Scale
- Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter
+ Qt::AlignmentFlag::AlignRight|Qt::AlignmentFlag::AlignTrailing|Qt::AlignmentFlag::AlignVCenter
-
- QAbstractSpinBox::UpDownArrows
+ QAbstractSpinBox::ButtonSymbols::UpDownArrows
%
@@ -293,30 +293,10 @@
- -
-
-
- %
-
-
- 10
-
-
- 400
-
-
- 100
-
-
-
-
-
- -
-
-
- Qt::Horizontal
+ Qt::Orientation::Horizontal
@@ -338,14 +318,14 @@
Color
- Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter
+ Qt::AlignmentFlag::AlignRight|Qt::AlignmentFlag::AlignTrailing|Qt::AlignmentFlag::AlignVCenter
-
- QAbstractSpinBox::UpDownArrows
+ QAbstractSpinBox::ButtonSymbols::UpDownArrows
%
@@ -366,12 +346,65 @@
+ -
+
+
-
+
+
+ Qt::Orientation::Horizontal
+
+
+
+ 40
+ 20
+
+
+
+
+ -
+
+
+ Scale image in response to input audio file
+
+
+ Qt::LayoutDirection::RightToLeft
+
+
+ Respond to Audio
+
+
+ true
+
+
+ false
+
+
+ false
+
+
+
+ -
+
+
+ Sensitivity
+
+
+
+ -
+
+
+ 1
+
+
+
+
+
-
- Qt::Vertical
+ Qt::Orientation::Vertical
diff --git a/src/avp/components/original.py b/src/avp/components/original.py
index 1e7ef86..64eba4d 100644
--- a/src/avp/components/original.py
+++ b/src/avp/components/original.py
@@ -57,8 +57,8 @@ def previewRender(self):
def preFrameRender(self, **kwargs):
super().preFrameRender(**kwargs)
- self.smoothConstantDown = 0.08 + 0 if not self.smooth else self.smooth / 15
- self.smoothConstantUp = 0.8 - 0 if not self.smooth else self.smooth / 15
+ smoothConstantDown = 0.08 if not self.smooth else self.smooth / 15
+ smoothConstantUp = 0.8 if not self.smooth else self.smooth / 15
self.lastSpectrum = None
self.spectrumArray = {}
@@ -69,9 +69,10 @@ def preFrameRender(self, **kwargs):
i,
self.completeAudioArray,
self.sampleSize,
- self.smoothConstantDown,
- self.smoothConstantUp,
+ smoothConstantDown,
+ smoothConstantUp,
self.lastSpectrum,
+ self.scale,
)
self.spectrumArray[i] = copy(self.lastSpectrum)
@@ -92,14 +93,15 @@ def frameRender(self, frameNo):
self.layout,
)
+ @staticmethod
def transformData(
- self,
i,
completeAudioArray,
sampleSize,
smoothConstantDown,
smoothConstantUp,
lastSpectrum,
+ scale,
):
if len(completeAudioArray) < (i + sampleSize):
sampleSize = len(completeAudioArray) - i
@@ -117,7 +119,9 @@ def transformData(
# filter the noise away
# y[y<80] = 0
- y = self.scale * numpy.log10(y)
+ with numpy.errstate(divide="ignore"):
+ y = scale * numpy.log10(y)
+
y[numpy.isinf(y)] = 0
if lastSpectrum is not None:
diff --git a/src/avp/core.py b/src/avp/core.py
index 402b532..196cd7d 100644
--- a/src/avp/core.py
+++ b/src/avp/core.py
@@ -71,7 +71,7 @@ def componentListChanged(self):
def insertComponent(self, compPos, component, loader):
"""
Creates a new component using these args:
- (compPos, component obj or moduleIndex, MWindow/Command/Core obj)
+ (compPos, component obj or moduleIndex, MWindow/Command obj)
"""
if compPos < 0 or compPos > len(self.selectedComponents):
compPos = len(self.selectedComponents)
diff --git a/src/avp/video_thread.py b/src/avp/video_thread.py
index 5d72409..967d2fe 100644
--- a/src/avp/video_thread.py
+++ b/src/avp/video_thread.py
@@ -253,18 +253,16 @@ def showPreview(self, frame):
@pyqtSlot()
def createVideo(self):
"""
- 1. Numpy is set to ignore division errors during this method
- 2. Determine length of final video
- 3. Call preFrameRender on each component
- 4. Create the main FFmpeg command
- 5. Open the out_pipe to FFmpeg process
- 6. Iterate over the audio data array and call frameRender on the components to get frames
- 7. Close the out_pipe
- 8. Call postFrameRender on each component
+ 1. Determine length of final video
+ 2. Call preFrameRender on each component
+ 3. Create the main FFmpeg command
+ 4. Open the out_pipe to FFmpeg process
+ 5. Iterate over the audio data array and call frameRender on the components to get frames
+ 6. Close the out_pipe
+ 7. Call postFrameRender on each component
"""
log.debug("Video worker received signal to createVideo")
log.debug("Video thread id: {}".format(int(QtCore.QThread.currentThreadId())))
- numpy.seterr(divide="ignore")
self.encoding.emit(True)
self.extraAudio = []
self.width = int(self.settings.value("outputWidth"))
diff --git a/tests/__init__.py b/tests/__init__.py
index d0073ef..b615681 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -1,27 +1,39 @@
-import pytest
import os
-import sys
+import numpy
+
+# core always has to be imported first
+import avp.core
+from avp.toolkit.ffmpeg import readAudioFile
+from pytest import fixture
+
+
+@fixture
+def audioData():
+ """Fixture that gives a tuple of (completeAudioArray, duration)"""
+ soundFile = getTestDataPath("test.ogg")
+ yield readAudioFile(soundFile, MockVideoWorker())
def getTestDataPath(filename):
+ """Get path to a file in the ./data directory"""
tests_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(tests_dir, "data", filename)
-def run(logFile):
- """Run Pytest, which then imports and runs all tests in this module."""
- os.environ["PYTEST_QT_API"] = "PyQt6"
- with open(logFile, "w") as f:
- # temporarily redirect stdout to a text file so we capture pytest's output
- sys.stdout = f
- try:
- val = pytest.main(
- [
- os.path.dirname(__file__),
- "-s", # disable pytest's internal capturing of stdout etc.
- ]
- )
- finally:
- sys.stdout = sys.__stdout__
-
- return val
+class MockSignal:
+ """Pretends to be a pyqtSignal"""
+
+ def emit(self, *args):
+ pass
+
+
+class MockVideoWorker:
+ """Pretends to be a video thread worker"""
+
+ progressBarSetText = MockSignal()
+ progressBarUpdate = MockSignal()
+
+
+def imageDataSum(image):
+ """Get sum of raw data of a Pillow Image object"""
+ return numpy.asarray(image, dtype="int32").sum(dtype="int32")
diff --git a/tests/test_classic_visualizer.py b/tests/test_classic_visualizer.py
new file mode 100644
index 0000000..e301263
--- /dev/null
+++ b/tests/test_classic_visualizer.py
@@ -0,0 +1,71 @@
+from avp.command import Command
+from pytestqt import qtbot
+from pytest import fixture
+from . import audioData, MockSignal, imageDataSum
+
+
+sampleSize = 1470 # 44100 / 30 = 1470
+
+
+@fixture
+def coreWithClassicComp(qtbot):
+ """Fixture providing a Command object with Classic Visualizer component added"""
+ command = Command()
+ command.core.insertComponent(
+ 0, command.core.moduleIndexFor("Classic Visualizer"), command
+ )
+ yield command.core
+
+
+def test_comp_classic_added(coreWithClassicComp):
+ """Add Classic Visualizer to core"""
+ assert len(coreWithClassicComp.selectedComponents) == 1
+
+
+def test_comp_classic_removed(coreWithClassicComp):
+ """Remove Classic Visualizer from core"""
+ coreWithClassicComp.removeComponent(0)
+ assert len(coreWithClassicComp.selectedComponents) == 0
+
+
+def test_comp_classic_drawBars(coreWithClassicComp, audioData):
+ """Call drawBars after creating audio spectrum data manually."""
+
+ spectrumArray = {
+ 0: coreWithClassicComp.selectedComponents[0].transformData(
+ 0, audioData[0], sampleSize, 0.08, 0.8, None, 20
+ )
+ }
+ for i in range(sampleSize, len(audioData[0]), sampleSize):
+ spectrumArray[i] = coreWithClassicComp.selectedComponents[0].transformData(
+ i,
+ audioData[0],
+ sampleSize,
+ 0.08,
+ 0.8,
+ spectrumArray[i - sampleSize].copy(),
+ 20,
+ )
+ image = coreWithClassicComp.selectedComponents[0].drawBars(
+ 1920, 1080, spectrumArray[sampleSize * 4], (0, 0, 0), 0
+ )
+ assert imageDataSum(image) == 37872316
+
+
+def test_comp_classic_drawBars_using_preFrameRender(coreWithClassicComp, audioData):
+ """Call drawBars after creating audio spectrum data using preFrameRender."""
+ comp = coreWithClassicComp.selectedComponents[0]
+ comp.preFrameRender(
+ completeAudioArray=audioData[0],
+ sampleSize=sampleSize,
+ progressBarSetText=MockSignal(),
+ progressBarUpdate=MockSignal(),
+ )
+ image = comp.drawBars(
+ 1920,
+ 1080,
+ coreWithClassicComp.selectedComponents[0].spectrumArray[sampleSize * 4],
+ (0, 0, 0),
+ 0,
+ )
+ assert imageDataSum(image) == 37872316
diff --git a/tests/test_commandline_parser.py b/tests/test_commandline_parser.py
index 8b07b8c..d092072 100644
--- a/tests/test_commandline_parser.py
+++ b/tests/test_commandline_parser.py
@@ -1,37 +1,38 @@
import sys
import pytest
from avp.command import Command
+from pytestqt import qtbot
-def test_commandline_help():
+def test_commandline_help(qtbot):
command = Command()
sys.argv = ["", "--help"]
with pytest.raises(SystemExit):
command.parseArgs()
-def test_commandline_help_if_bad_args():
+def test_commandline_help_if_bad_args(qtbot):
command = Command()
sys.argv = ["", "--junk"]
with pytest.raises(SystemExit):
command.parseArgs()
-def test_commandline_launches_gui_if_verbose():
+def test_commandline_launches_gui_if_verbose(qtbot):
command = Command()
sys.argv = ["", "--verbose"]
mode = command.parseArgs()
assert mode == "GUI"
-def test_commandline_launches_gui_if_verbose_with_project():
+def test_commandline_launches_gui_if_verbose_with_project(qtbot):
command = Command()
sys.argv = ["", "test", "--verbose"]
mode = command.parseArgs()
assert mode == "GUI"
-def test_commandline_tries_to_export():
+def test_commandline_tries_to_export(qtbot):
command = Command()
didCallFunction = False
@@ -43,3 +44,13 @@ def captureFunction(*args):
command.createAudioVisualization = captureFunction
command.parseArgs()
assert didCallFunction
+
+
+def test_commandline_parses_classic_by_alias(qtbot):
+ command = Command()
+ assert command.parseCompName("original") == "Classic Visualizer"
+
+
+def test_commandline_parses_conway_by_name(qtbot):
+ command = Command()
+ assert command.parseCompName("conway") == "Conway's Game of Life"
diff --git a/tests/test_image_comp.py b/tests/test_image_comp.py
new file mode 100644
index 0000000..a4f05e1
--- /dev/null
+++ b/tests/test_image_comp.py
@@ -0,0 +1,50 @@
+from avp.command import Command
+from pytestqt import qtbot
+from pytest import fixture
+from . import audioData, MockSignal, imageDataSum, getTestDataPath
+
+
+sampleSize = 1470 # 44100 / 30 = 1470
+
+
+@fixture
+def coreWithImageComp(qtbot):
+ """Fixture providing a Command object with Image component added"""
+ command = Command()
+ command.settings.setValue("outputHeight", 1080)
+ command.settings.setValue("outputWidth", 1920)
+ command.core.insertComponent(0, command.core.moduleIndexFor("Image"), command)
+ yield command.core
+
+
+def test_comp_image_set_path(coreWithImageComp):
+ "Set imagePath of Image component"
+ comp = coreWithImageComp.selectedComponents[0]
+ comp.imagePath = getTestDataPath("test.jpg")
+ image = comp.previewRender()
+ assert imageDataSum(image) == 463711601
+
+
+def test_comp_image_scale_50_1080p(coreWithImageComp):
+ """Image component stretches image to 50% at 1080p"""
+ comp = coreWithImageComp.selectedComponents[0]
+ comp.imagePath = getTestDataPath("test.jpg")
+ image = comp.previewRender()
+ sum = imageDataSum(image)
+ comp.page.spinBox_scale.setValue(50)
+ assert imageDataSum(comp.previewRender()) - sum / 4 < 2000
+
+
+def test_comp_image_scale_50_720p(coreWithImageComp):
+ """Image component stretches image to 50% at 720p"""
+ comp = coreWithImageComp.selectedComponents[0]
+ comp.imagePath = getTestDataPath("test.jpg")
+ comp.page.spinBox_scale.setValue(50)
+ image = comp.previewRender()
+ sum = imageDataSum(image)
+ comp.parent.settings.setValue("outputHeight", 720)
+ comp.parent.settings.setValue("outputWidth", 1280)
+ newImage = comp.previewRender()
+ assert image.width == 1920
+ assert newImage.width == 1280
+ assert imageDataSum(comp.previewRender()) == sum
diff --git a/tests/test_mainwindow_undostack.py b/tests/test_mainwindow_undostack.py
new file mode 100644
index 0000000..1eec1ef
--- /dev/null
+++ b/tests/test_mainwindow_undostack.py
@@ -0,0 +1,73 @@
+from pytest import fixture
+from pytestqt import qtbot
+from avp.gui.mainwindow import MainWindow
+from . import getTestDataPath
+
+
+@fixture
+def window(qtbot):
+ window = MainWindow(None, None)
+ qtbot.addWidget(window)
+ window.settings.setValue("outputWidth", 1920)
+ window.settings.setValue("outputHeight", 1080)
+ yield window
+
+
+def test_undo_classic_visualizer_sensitivity(window, qtbot):
+ """Undo Classic Visualizer component sensitivity setting
+ should undo multiple merged actions."""
+ window.core.insertComponent(
+ 0, window.core.moduleIndexFor("Classic Visualizer"), window
+ )
+ comp = window.core.selectedComponents[0]
+ comp.imagePath = getTestDataPath("test.jpg")
+ for i in range(1, 100):
+ comp.page.spinBox_scale.setValue(i)
+ assert comp.scale == 99
+ window.undoStack.undo()
+ assert comp.scale == 20
+
+
+def test_undo_image_scale(window, qtbot):
+ """Undo Image component scale setting should undo multiple merged actions."""
+ window.core.insertComponent(0, window.core.moduleIndexFor("Image"), window)
+ comp = window.core.selectedComponents[0]
+ comp.imagePath = getTestDataPath("test.jpg")
+ comp.page.spinBox_scale.setValue(100)
+ for i in range(10, 401):
+ comp.page.spinBox_scale.setValue(i)
+ assert comp.scale == 400
+ window.undoStack.undo()
+ assert comp.scale == 10
+ window.undoStack.undo()
+ assert comp.scale == 100
+
+
+def test_undo_image_resizeMode(window, qtbot):
+ window.core.insertComponent(0, window.core.moduleIndexFor("Image"), window)
+ comp = window.core.selectedComponents[0]
+ comp.page.comboBox_resizeMode.setCurrentIndex(1)
+ assert not comp.page.spinBox_scale.isEnabled()
+ window.undoStack.undo()
+ assert comp.page.spinBox_scale.isEnabled()
+
+
+def test_undo_title_text_merged(window, qtbot):
+ """Undoing title text change should undo all recent changes."""
+ window.core.insertComponent(0, window.core.moduleIndexFor("Title Text"), window)
+ comp = window.core.selectedComponents[0]
+ comp.page.lineEdit_title.setText("avp")
+ comp.page.lineEdit_title.setText("test")
+ window.undoStack.undo()
+ assert comp.title == "Text"
+
+
+def test_undo_title_text_not_merged(window, qtbot):
+ """Undoing title text change should undo up to previous different action"""
+ window.core.insertComponent(0, window.core.moduleIndexFor("Title Text"), window)
+ comp = window.core.selectedComponents[0]
+ comp.page.lineEdit_title.setText("avp")
+ comp.page.spinBox_xTextAlign.setValue(0)
+ comp.page.lineEdit_title.setText("test")
+ window.undoStack.undo()
+ assert comp.title == "avp"
diff --git a/tests/test_text_comp.py b/tests/test_text_comp.py
new file mode 100644
index 0000000..3bc0be6
--- /dev/null
+++ b/tests/test_text_comp.py
@@ -0,0 +1,32 @@
+from avp.command import Command
+from pytestqt import qtbot
+from pytest import fixture
+from . import audioData, MockSignal, imageDataSum
+
+
+@fixture
+def coreWithTextComp(qtbot):
+ """Fixture providing a Command object with Title Text component added"""
+ command = Command()
+ command.core.insertComponent(0, command.core.moduleIndexFor("Title Text"), command)
+ yield command.core
+
+
+def test_comp_text_renderFrame_resize(coreWithTextComp):
+ """Call renderFrame of Title Text component added to Command object."""
+ comp = coreWithTextComp.selectedComponents[0]
+ comp.parent.settings.setValue("outputWidth", 1920)
+ comp.parent.settings.setValue("outputHeight", 1080)
+ comp.parent.core.updateComponent(0)
+ image = comp.frameRender(0)
+ assert imageDataSum(image) == 2957069
+
+
+def test_comp_text_renderFrame(coreWithTextComp):
+ """Call renderFrame of Title Text component added to Command object."""
+ comp = coreWithTextComp.selectedComponents[0]
+ comp.parent.settings.setValue("outputWidth", 1280)
+ comp.parent.settings.setValue("outputHeight", 720)
+ comp.parent.core.updateComponent(0)
+ image = comp.frameRender(0)
+ assert imageDataSum(image) == 1412293 or 1379298
diff --git a/tests/test_toolkit_common.py b/tests/test_toolkit_common.py
new file mode 100644
index 0000000..d903842
--- /dev/null
+++ b/tests/test_toolkit_common.py
@@ -0,0 +1,13 @@
+from pytestqt import qtbot
+from avp.command import Command
+from avp.toolkit import blockSignals
+
+
+def test_blockSignals(qtbot):
+ command = Command()
+ command.core.insertComponent(0, 0, command)
+ comp = command.core.selectedComponents[0]
+ assert comp.page.spinBox_scale.signalsBlocked() == False
+ with blockSignals(comp.page.spinBox_scale):
+ assert comp.page.spinBox_scale.signalsBlocked() == True
+ assert comp.page.spinBox_scale.signalsBlocked() == False
diff --git a/tests/test_toolkit_ffmpeg.py b/tests/test_toolkit_ffmpeg.py
new file mode 100644
index 0000000..b015470
--- /dev/null
+++ b/tests/test_toolkit_ffmpeg.py
@@ -0,0 +1,64 @@
+import pytest
+from avp.command import Command
+from avp.toolkit.ffmpeg import createFfmpegCommand
+from . import audioData
+
+
+def test_readAudioFile_data(audioData):
+ assert len(audioData[0]) == 218453
+
+
+def test_readAudioFile_duration(audioData):
+ assert audioData[1] == 3.95
+
+
+@pytest.mark.parametrize("width, height", ((1920, 1080), (1280, 720)))
+def test_createFfmpegCommand(width, height):
+ command = Command()
+ command.settings.setValue("outputWidth", width)
+ command.settings.setValue("outputHeight", height)
+ ffmpegCmd = createFfmpegCommand("test.ogg", "/tmp", command.core.selectedComponents)
+ assert ffmpegCmd == [
+ "ffmpeg",
+ "-thread_queue_size",
+ "512",
+ "-y",
+ "-f",
+ "rawvideo",
+ "-vcodec",
+ "rawvideo",
+ "-s",
+ "%sx%s" % (width, height),
+ "-pix_fmt",
+ "rgba",
+ "-r",
+ "30",
+ "-t",
+ "0.100",
+ "-an",
+ "-i",
+ "-",
+ "-t",
+ "0.100",
+ "-i",
+ "test.ogg",
+ "-map",
+ "0:v",
+ "-map",
+ "1:a",
+ "-vcodec",
+ "libx264",
+ "-acodec",
+ "aac",
+ "-b:v",
+ "2500k",
+ "-b:a",
+ "192k",
+ "-pix_fmt",
+ "yuv420p",
+ "-preset",
+ "medium",
+ "-f",
+ "mp4",
+ "/tmp",
+ ]
diff --git a/tests/test_toolkit_frame.py b/tests/test_toolkit_frame.py
new file mode 100644
index 0000000..9486227
--- /dev/null
+++ b/tests/test_toolkit_frame.py
@@ -0,0 +1,14 @@
+import numpy
+from avp.toolkit.frame import BlankFrame, FloodFrame
+
+
+def test_blank_frame():
+ """BlankFrame creates a frame of all zeros"""
+ assert numpy.asarray(BlankFrame(1920, 1080), dtype="int32").sum() == 0
+
+
+def test_flood_frame():
+ """FloodFrame given (1, 1, 1, 1) creates a frame of sum 1920 * 1080 * 4"""
+ assert numpy.asarray(FloodFrame(1920, 1080, (1, 1, 1, 1)), dtype="int32").sum() == (
+ 1920 * 1080 * 4
+ )