diff --git a/Config/electron-launcher.json b/Config/electron-launcher.json index ffda416..2952621 100644 --- a/Config/electron-launcher.json +++ b/Config/electron-launcher.json @@ -2,7 +2,7 @@ "enableDevTools": true, "devToolsWindowStartPosition": { "x": 1912, - "y": -128 + "y": -8 }, "devToolsWindowStartDimensions": { "width": 1936, @@ -10,4 +10,4 @@ }, "maximizeDevToolsWindow": true, "hideDevToolsWindowTitle": false -} +} \ No newline at end of file diff --git a/Config/eslint.json b/Config/eslint.json index 8a72011..4041946 100644 --- a/Config/eslint.json +++ b/Config/eslint.json @@ -20,6 +20,8 @@ }, "globals": { "AddonOptionsFrame": "writable", + "AudioSource": "writable", + "AudioTrack": "writable", "assertApproximatelyEquals": "writable", "assertNotApproximatelyEquals": "writable", "assertTrue": "writable", diff --git a/Core/API/WebAudio/AudioSource.js b/Core/API/WebAudio/AudioSource.js new file mode 100644 index 0000000..a9c043d --- /dev/null +++ b/Core/API/WebAudio/AudioSource.js @@ -0,0 +1,152 @@ +class AudioSource { + static ERROR_INVALID_PLAYBACK_RATE = "Cannot set negative or zero playback rate; only positive values are supported"; + static ERROR_NEGATIVE_VOLUME_GAIN = "Cannot set negative volume gain; the lowest possible value is zero"; + static ERROR_NEGATIVE_TRANSITION_TIME = "Cannot set negative time value; the lowest possible value is zero"; + static ERROR_SOUND_NOT_READY = "Cannot play before audio is decoded; wait for isReady() or use setAutoplay(true)"; + + constructor(filePath) { + this.filePath = filePath; + + // BJS doesn't expose this, so we have to store it at this level (at the risk of being wrong) + this.playbackRate = 1; + + // May be fetched from disk (TODO: It's blocking, use async?) + const resource = C_Decoding.decodeFile(filePath); + this.uniqueID = new UniqueID(); + + // BJS uses WebAudio's context.decodeAudio, which consumes the buffer, so we must copy it or the original will be empty (TODO: Test for this) + function copy(src) { + var dst = new ArrayBuffer(src.byteLength); + new Uint8Array(dst).set(new Uint8Array(src)); + return dst; + } + const buffer = copy(resource.data); + const currentScene = C_Rendering.getActiveScene(); + const sound = new BABYLON.Sound(this.getUniqueID(), buffer, currentScene); + + // Apply some sane defaults (can be overwritten at will) + sound.spatialSound = true; + sound.loop = false; + sound.autoplay = false; + sound.maxDistance = 100; + sound.distanceModel = "linear"; + + this.sound = sound; + this.useHighQualityStereo(C_Settings.getValue("useHighQualityStereo")); + } + isUsingHighQualityStereo() { + return this.isUsingHRTF; + } + useHighQualityStereo(enabledFlag) { + // TODO Ensure only boolean can be passed + if (!enabledFlag) this.sound.switchPanningModelToEqualPower(); + else this.sound.switchPanningModelToHRTF(); + + this.isUsingHRTF = enabledFlag; + } + getDistanceModel() { + return this.sound.distanceModel; + } + setDistanceModel(newDistanceModel) { + // TODO Ensure it's a valid model string + this.sound.distanceModel = newDistanceModel; + } + playWhenReady(shouldPlayOnLoad) { + this.sound.autoplay = shouldPlayOnLoad; + } + setMaxDistance(distanceInWorldUnits) { + this.sound.maxDistance = distanceInWorldUnits; + } + getMaxDistance() { + return this.sound.maxDistance; + } + setLooping(isLooping) { + return (this.sound.loop = isLooping); + } + isLooping() { + return this.sound.loop; + } + isSpatialized() { + return this.sound.spatialSound; + } + setSpatialized(isSoundSpatialized) { + this.sound.spatialSound = isSoundSpatialized; + } + getFilePath() { + return this.filePath; + } + startPlaying(transitionTimeInMilliseconds = 0) { + this.playWhenReady(true); // Needed if the audio is still decoding (awkward...) + this.sound.play(transitionTimeInMilliseconds / 1000); // ms to sec + } + stopPlaying(timeToFadeOutCompletelyInMilliseconds = 0) { + this.sound.stop(timeToFadeOutCompletelyInMilliseconds / 1000); // ms to sec + } + isPaused() { + return this.sound.isPaused; + } + isPlaying() { + return this.sound.isPlaying; + } + setVolume(volumeGain, transitionTimeInMilliseconds = 0) { + const usageError = "Usage: AudioSource.setVolume(Number volumeGain, Number transitionTimeInMilliseconds)"; + validateNumber(volumeGain, usageError); + validateNumber(transitionTimeInMilliseconds, usageError); + + if (volumeGain < 0) throw new RangeError(AudioSource.ERROR_NEGATIVE_VOLUME_GAIN); + if (transitionTimeInMilliseconds < 0) throw new RangeError(AudioSource.ERROR_NEGATIVE_TRANSITION_TIME); + + this.sound.setVolume(volumeGain, transitionTimeInMilliseconds / 1000); // ms to sec + } + getVolume() { + return this.sound.getVolume(); + } + pause() { + this.sound.pause(); + } + resume() { + this.sound.play(); + } + getCurrentTime() { + return this.sound.currentTime * 1000; // s to ms + } + fadeOut(timeToFadeOutCompletelyInMilliseconds = 0) { + const usageError = "Usage: AudioSource.fadeOut(Number timeToFadeOutCompletelyInMilliseconds)"; + validateNumber(timeToFadeOutCompletelyInMilliseconds, usageError); + + if (timeToFadeOutCompletelyInMilliseconds < 0) throw new RangeError(AudioSource.ERROR_NEGATIVE_TRANSITION_TIME); + this.setVolume(0, timeToFadeOutCompletelyInMilliseconds); + } + fadeIn(volumeGain = 1, transitionTimeInMilliseconds = 0) { + const usageError = "Usage: AudioSource.fadeIn(Number volumeGain, Number transitionTimeInMilliseconds)"; + validateNumber(volumeGain, usageError); + validateNumber(transitionTimeInMilliseconds, usageError); + + if (volumeGain < 0) throw new RangeError(AudioSource.ERROR_NEGATIVE_VOLUME_GAIN); + if (transitionTimeInMilliseconds < 0) throw new RangeError(AudioSource.ERROR_NEGATIVE_TRANSITION_TIME); + + this.setVolume(volumeGain, transitionTimeInMilliseconds); + } + getPlaybackRate() { + return this.playbackRate; + } + setPlaybackRate(newPlaybackRate) { + validateNumber(newPlaybackRate, "Usage: AudioSource.setPlaybackRate(Number newPlaybackRate)"); + + if (newPlaybackRate <= 0) throw new RangeError(AudioSource.ERROR_INVALID_PLAYBACK_RATE); + + this.playbackRate = newPlaybackRate; + this.sound.setPlaybackRate(newPlaybackRate); + } + getUniqueID() { + return this.uniqueID.toString(); + } + destroy() { + if (this.sound === null) return false; // Already destroyed; all operations should fail + + this.sound.dispose(); + this.sound = null; + + return true; + } +} diff --git a/Core/API/WebAudio/AudioTrack.js b/Core/API/WebAudio/AudioTrack.js new file mode 100644 index 0000000..eaa2b89 --- /dev/null +++ b/Core/API/WebAudio/AudioTrack.js @@ -0,0 +1,162 @@ +// var format = require("util").format; + +class AudioTrack { + // static ERROR_INVALID_VOICE_HANDLE = "Invalid voice handle"; + constructor() { + this.volume = 1; // TODO set for each track in C_WebAudio + this.isTrackMuted = false; + this.useHRTF = C_Settings.getValue("useHighQualityStereo"); // TODO update via options + + const currentScene = C_Rendering.getActiveScene(); + this.soundtrack = new BABYLON.SoundTrack(currentScene); + } + getNumSounds() { + return this.soundtrack.soundCollection.length; + } + getUniqueID() { + return this.soundtrack.id; + } + addSound(sound) { + this.soundtrack.addSound(sound); + } + removeSound(sound) { + this.soundtrack.removeSound(sound); + } + connectAnalyzer(analyzer) { + this.soundtrack.connectToAnalyser(analyzer); + } + destroy() { + this.soundtrack.dispose(); + } + setVolume(volumeGain) { + this.volume = volumeGain; + if (this.isTrackMuted) return; // Do not change the actual volume until it's unmuted + + this.soundtrack.setVolume(volumeGain); + } + isMuted() { + return this.isTrackMuted; + } + mute() { + this.isTrackMuted = true; + this.soundtrack.setVolume(0); + } + unmute() { + this.isTrackMuted = false; + this.soundtrack.setVolume(this.volume); + } + useHighQualityStereo(enabledFlag) { + if (enabledFlag) this.soundtrack.switchPanningModelToHRTF(); + else this.soundtrack.switchPanningModelToEqualPower(); + + this.useHRTF = enabledFlag; + } + isUsingHighQualityStereo() { + return this.useHRTF; + } + fadeOutStop(fadeoutTimeInMilliseconds = 500) { + this.soundtrack.soundCollection.forEach((sound) => { + DEBUG("Cleaning up audio source " + sound.name + " (playback has ended)"); + sound.setVolume(0, fadeoutTimeInMilliseconds / 1000); // ms to s + sound.stop(fadeoutTimeInMilliseconds / 1000); + + setTimeout(() => { + // Defer cleanup so the fadeout has time to finish + this.removeSound(sound); + sound.dispose(); + }, fadeoutTimeInMilliseconds); + }); + } + // getAudioSource(soundHandleID) { + // if (soundHandleID === undefined) throw new RangeError(AudioTrack.ERROR_INVALID_VOICE_HANDLE + ": " + soundHandleID); + // return this.voices[soundHandleID]; + // } + // addAudioSource(audioSource) { + // const soundHandleID = audioSource.getUniqueID(); // New index at which the source will be inserted + // audioSource.setVolume(this.volumeGain); + // // Volumes need to be synchronized or some sounds will stick out like a sore thumb + // this.voices[soundHandleID] = audioSource; + // this.soundtrack.addSound(audioSource.sound); + // audioSource.sound.onEndedObservable.add(() => { + // DEBUG("Playback for audio source " + audioSource.uniqueID + " has ended"); + // this.removeAudioSource(soundHandleID); + // }); + // this.numVoices++; + // return soundHandleID; + // } + // removeAudioSource(soundHandleID) { + // // ringbuffer, FIFO - size: music = 1, ambience = 10, sfx = 32, track.hasAudioSource to determine when it's removed? object pool? + // DEBUG(format("Removing audio source %s", soundHandleID)); + // if (soundHandleID === undefined) { + // throw new RangeError(AudioTrack.ERROR_INVALID_VOICE_HANDLE + ": " + soundHandleID); + // } + // const audioSource = this.voices[soundHandleID]; + // if (audioSource === undefined) return true; + + // audioSource.stopPlaying(); + // this.soundtrack.removeSound(audioSource.sound); + // audioSource.destroy(); + + // delete this.voices[soundHandleID]; + // this.numVoices--; + + // return audioSource; + // } + // purgeInactiveVoices() { + // for (const soundHandleID of Object.keys(this.voices)) { + // const audioSource = this.voices[soundHandleID]; + // if (!audioSource.isPlaying()) { + // DEBUG(format("Purging inactive audio source %s (%s)", soundHandleID, audioSource.getFilePath())); + // this.removeAudioSource(soundHandleID); + // } + // } + // } + // purgeAllVoices() { + // for (const soundHandleID of Object.keys(this.voices)) { + // const audioSource = this.voices[soundHandleID]; + // DEBUG(format("Purging audio source %s (%s)", soundHandleID, audioSource.getFilePath())); + // this.removeAudioSource(soundHandleID); + // } + // } + // fadeOutStop(fadeoutTimeInMilliseconds = 500) { + // for (const soundHandleID of Object.keys(this.voices)) { + // const audioSource = this.voices[soundHandleID]; + + // audioSource.fadeOut(fadeoutTimeInMilliseconds); + // audioSource.stopPlaying(fadeoutTimeInMilliseconds); + // } + // } + // fadeInStart(fadeInTimeInMilliseconds = 500) { + // for (const soundHandleID of Object.keys(this.voices)) { + // const audioSource = this.voices[soundHandleID]; + // // Can't fade in properly if it starts blasting at a higher volume immediately + // audioSource.setVolume(0); + // audioSource.startPlaying(); + // audioSource.fadeIn(this.volumeGain, fadeInTimeInMilliseconds); + // // audioSource.setVolume(this.volumeGain, timeToTransitionInMilliseconds); + // } + // } + // getVolume() { + // return this.volumeGain; + // } + // setVolume(volumeGain, timeToTransitionInMilliseconds = 0) { + // this.volumeGain = volumeGain; + // for (const soundHandleID of Object.keys(this.voices)) { + // const audioSource = this.voices[soundHandleID]; + // audioSource.setVolume(volumeGain, timeToTransitionInMilliseconds); + // } + // } + // getNumAudioSources() { + // return this.numVoices; + // } + // getNumActiveVoices() { + // let count = 0; + + // for (const soundHandleID of Object.keys(this.voices)) { + // const audioSource = this.voices[soundHandleID]; + // if (audioSource.isPlaying()) count++; + // } + + // return count; + // } +} diff --git a/Core/API/WebAudio/C_WebAudio.js b/Core/API/WebAudio/C_WebAudio.js new file mode 100644 index 0000000..ba34b11 --- /dev/null +++ b/Core/API/WebAudio/C_WebAudio.js @@ -0,0 +1,271 @@ +// supported formats: mp3, ogg? (query chrome/electron) + +const C_WebAudio = { + musicTrack: new AudioTrack(), + sfxTrack: new AudioTrack(), + ambienceTrack: new AudioTrack(), + // This is needed to initialize the engine (before playback can start) + // Otherwise the first playback is delayed as it also initializes the engine, and global volume is unavailable + audioContext: BABYLON.Engine.audioEngine.audioContext, + // Only mp3 and ogg are supported by default, but users (and addons) could add support for other formats + supportedFormats: { + // Note: Always use lower case extensions, as the C_Decoder interface will remove any capitalization + ogg: true, + mp3: true, + wav: true, + }, + isSoundEnabled: true, + musicFadeoutTimeInMilliseconds: 500, + // Internally, BabylonJS appears to convert negative master gain values to positive ones + // Since that seems counter-intuitive and weird, we simply disallow it + ERROR_NEGATIVE_VOLUME_GAIN: "Cannot set negative volume gain; the lowest possible value is zero", + ERROR_INVALID_TRACK_ID: "No such audio track exists", + playMusic(filePath) { + validateString(filePath, "Usage: C_WebAudio.playMusic(String filePath)"); + + if (!this.isSoundEnabled) return; + + // const musicTrack = this.tracks[Enum.AUDIO_CHANNEL_MUSIC]; + this.stopMusic(); // Only one music track can play concurrently (effectively two voices that transition) + + const resource = C_Decoding.decodeFile(filePath); + const soundID = new UniqueID().toString(); + + // BJS uses WebAudio's context.decodeAudio, which consumes the buffer, so we must copy it or the original will be empty (TODO: Test for this) + const buffer = resource.copyBuffer(); + const currentScene = C_Rendering.getActiveScene(); + const sound = new BABYLON.Sound(soundID, buffer, currentScene); + + sound.autoplay = true; + sound.loop = true; + // sound.setVolume(C_Settings.getValue("musicVolume")); + + this.musicTrack.addSound(sound); + + // We have to apply this again since the setting isn't applied to each source automatically by BJS + if (this.musicTrack.isUsingHighQualityStereo()) this.musicTrack.useHighQualityStereo(true); + + // const audioSource = new AudioSource(filePath); + // audioSource.playWhenReady(true); + + // const soundHandleID = musicTrack.addAudioSource(audioSource); + + // audioSource.setLooping(true); + // audioSource.setVolume(0); + // audioSource.startPlaying(); + // sound.setVolume(C_Settings.getValue("musicVolume"), this.musicTransitionTimeInMilliseconds / 1000); // ms to s + + return sound; + }, + playSound(filePath, trackID = Enum.AUDIO_CHANNEL_SFX, isLooping = false, volume = 1) { + validateString(filePath, "Usage: playSound(String filePath [, String trackID, Boolean allowDuplicate])"); + + if (!this.isSoundEnabled) return; + + const track = this.tracks[trackID]; + + const audioSource = new AudioSource(filePath, { loop: isLooping, autoplay: !this.isSoundEnabled, volume: volume }); + if (isLooping) audioSource.setLooping(true); + const soundHandleID = track.addAudioSource(audioSource); + + return soundHandleID; + }, + stopMusic() { + // const musicTrack = this.tracks[Enum.AUDIO_CHANNEL_MUSIC]; + // musicTrack.fadeOutStop(this.musicTransitionTimeInMilliseconds); + this.musicTrack.fadeOutStop(this.musicTransitionTimeInMilliseconds); + }, + playEffectSound(filePath) { + return this.playSound(filePath, Enum.AUDIO_CHANNEL_SFX, false, C_Settings.getValue("sfxVolume")); // SFX should never loop + }, + playAmbientSound(filePath) { + return this.playSound(filePath, Enum.AUDIO_CHANNEL_AMBIENCE, true, C_Settings.getValue("ambienceVolume")); // Ambient sounds should always loop + }, + stopSound(soundHandleID, trackID = Enum.AUDIO_CHANNEL_SFX) { + const track = this.tracks[trackID]; + track.removeAudioSource(soundHandleID); + }, + stopAmbientSound(soundHandleID) { + this.stopSound(soundHandleID, Enum.AUDIO_CHANNEL_AMBIENCE); + }, + stopEffectSound(soundHandleID) { + this.stopSound(soundHandleID, Enum.AUDIO_CHANNEL_SFX); + }, + getTrackInfo(trackID) { + validateString(trackID, "Usage: getTrackInfo(String trackID)"); + + if (this.tracks[trackID] === undefined) throw new RangeError(this.ERROR_INVALID_TRACK_ID + ": " + trackID); + + return this.tracks[trackID]; + }, + createTrack(trackID) { + validateString(trackID, "Usage: createTrack(String trackID)"); + + const channel = this.tracks[trackID] || new AudioTrack(trackID); + this.tracks[trackID] = channel; + + return channel; + }, + getGlobalVolume() { + return BABYLON.Engine.audioEngine.getGlobalVolume(); + }, + setGlobalVolume(volumeGain, persist = true) { + if (volumeGain < 0) throw new RangeError(this.ERROR_NEGATIVE_VOLUME_GAIN); + + // if (!persist) return; // No need to save it since it's temporary + if (persist === true) C_Settings.setValue("globalVolume", volumeGain); + if (!this.isSoundEnabled) return; + // Defer volume adjustment until sound is unmuted + BABYLON.Engine.audioEngine.setGlobalVolume(volumeGain); + }, + setTrackVolume(trackID, volumeGain, timeToTransitionInMilliseconds = 0) { + if (volumeGain < 0) throw new RangeError(this.ERROR_NEGATIVE_VOLUME_GAIN); + + validateString(trackID, "Usage: setTrackVolume(String trackID, Number volumeGain)"); + validateNumber(volumeGain, "Usage: setTrackVolume(String trackID, Number volumeGain)"); + if (this.tracks[trackID] === undefined) throw new RangeError(this.ERROR_INVALID_TRACK_ID + ": " + trackID); + + this.tracks[trackID].setVolume(volumeGain, timeToTransitionInMilliseconds); + }, + getTrackVolume(trackID) { + validateString(trackID, "Usage: getTrackVolume(String trackID)"); + if (this.tracks[trackID] === undefined) throw new RangeError(this.ERROR_INVALID_TRACK_ID + ": " + trackID); + return this.tracks[trackID].getVolume(); + }, + setMusicVolume(volumeGain, timeToTransitionInMilliseconds = 0) { + // TODO Ensure 0 - 1 scale + // this.setTrackVolume(Enum.AUDIO_CHANNEL_MUSIC, volumeGain, timeToTransitionInMilliseconds); // fade + this.musicTrack.setVolume(volumeGain); + C_Settings.setValue("musicVolume", volumeGain); + }, + getMusicVolume() { + return this.getTrackVolume(Enum.AUDIO_CHANNEL_MUSIC); + }, + setEffectsVolume(volumeGain, timeToTransitionInMilliseconds = 0) { + // TODO Ensure 0 - 1 scale + this.sfxTrack.setVolume(volumeGain); + // this.setTrackVolume(Enum.AUDIO_CHANNEL_SFX, volumeGain, timeToTransitionInMilliseconds); + C_Settings.setValue("sfxVolume", volumeGain); + }, + getEffectsVolume() { + return this.getTrackVolume(Enum.AUDIO_CHANNEL_SFX); + }, + setAmbienceVolume(volumeGain, timeToTransitionInMilliseconds = 0) { + // TODO Ensure 0 - 1 scale + this.ambienceTrack.setVolume(volumeGain); + // this.setTrackVolume(Enum.AUDIO_CHANNEL_AMBIENCE, volumeGain, timeToTransitionInMilliseconds); + C_Settings.setValue("ambienceVolume", volumeGain); + }, + getAmbienceVolume() { + return this.getTrackVolume(Enum.AUDIO_CHANNEL_AMBIENCE); + }, + isAudioContextInitialized() { + // The AudioContext interface is from a TypeScript definition in BJS; it seems we can't access it directly here + return this.audioContext.constructor.name === "AudioContext"; + }, + isAudioAvailable() { + return BABYLON.Engine.audioEngine.canUseWebAudio; + }, + canPlayMP3() { + return BABYLON.Engine.audioEngine.isMP3supported; + }, + canPlayOGG() { + return BABYLON.Engine.audioEngine.isOGGsupported; + }, + canPlayWAV() { + // With the current audio backend, this is always true - but I guess making it explicit is not a terrible idea + return true; + }, + getSupportedFileFormats() { + return this.supportedFormats; + }, + isSoundDisabled() { + return this.isSoundEnabled; + }, + disableSound() { + C_Settings.setValue("enableSound", false); + + this.setGlobalVolume(0, false); + this.isSoundEnabled = false; + }, + enableSound() { + C_Settings.setValue("enableSound", true); + this.isSoundEnabled = true; + this.setGlobalVolume(C_Settings.getValue("globalVolume")); + + // TODO store global volume + }, + // We can't apply these after the fact, since it's set at creation time for each audio source // TODO: Not true + useHighQualityStereo(enabledFlag) { + // todo assert boolean + C_Settings.setValue("useHighQualityStereo", enabledFlag); + this.musicTrack.useHighQualityStereo(enabledFlag); + this.sfxTrack.useHighQualityStereo(enabledFlag); + this.ambienceTrack.useHighQualityStereo(enabledFlag); + // TODO enable in each track + }, + // disableHighQualityStereo() { + // C_Settings.setValue("useHighQualityStereo", false); + // TODO disable for each track + // }, + setBackgroundFade(enabledFlag) { + C_Settings.setValue("fadeSoundInBackground", enabledFlag); + // TODO ViewportContainer stuff here? + }, + // enableBackgroundFade() { + // C_Settings.setValue("fadeSoundInBackground", true); + // TODO ViewportContainer stuff here? + // }, + setBackgroundVolume(newVolumeGain) { + // TODO ensure it's between 0 and 1 + // TODO: Move event handler to SystemOptionsFrame / AudioOptionsFrame? + C_Settings.setValue("backgroundAudioVolume", newVolumeGain); + }, + enableMusicTrack() { + C_Settings.setValue("enableMusic", true); + // this.tracks[Enum.AUDIO_CHANNEL_MUSIC].fadeInStart(); + this.musicTrack.unmute(); + // this.tracks[Enum.AUDIO_CHANNEL_MUSIC].setVolume(C_Settings.getValue("musicVolume")); + }, + disableMusicTrack() { + C_Settings.setValue("enableMusic", false); + // const musicTrack = this.tracks[Enum.AUDIO_CHANNEL_MUSIC]; + // musicTrack.setVolume(0); + this.musicTrack.mute(); + // musicTrack.fadeOutStop(); + // musicTrack.purgeAllVoices(); + }, + disableEffectsTrack() { + C_Settings.setValue("enableSFX", false); + this.sfxTrack.mute(); + // this.tracks[Enum.AUDIO_CHANNEL_SFX].setVolume(0); + }, + enableEffectsTrack() { + C_Settings.setValue("enableSFX", true); + this.sfxTrack.unmute(); + // this.tracks[Enum.AUDIO_CHANNEL_SFX].setVolume(C_Settings.getValue("sfxVolume")); + }, + disableAmbienceTrack() { + C_Settings.setValue("enableAmbientSounds", false); + this.ambienceTrack.mute(); + // this.tracks[Enum.AUDIO_CHANNEL_AMBIENCE].setVolume(0); + }, + enableAmbienceTrack() { + C_Settings.setValue("enableAmbientSounds", true); + this.ambienceTrack.unmute(); + // this.tracks[Enum.AUDIO_CHANNEL_AMBIENCE].setVolume(C_Settings.getValue("ambienceVolume")); + }, + // TODO Move to SystemOptionsFrame + updateAudioChannels() { + this.musicTrack.setVolume(C_Settings.getValue("musicVolume")); + this.sfxTrack.setVolume(C_Settings.getValue("sfxVolume")); + this.ambienceTrack.setVolume(C_Settings.getValue("ambienceVolume")); + + if (!C_Settings.getValue("enableSound")) this.disableSound(); + if (!C_Settings.getValue("enableMusic")) this.disableMusicTrack(); + if (!C_Settings.getValue("enableSFX")) this.disableEffectsTrack(); + if (!C_Settings.getValue("enableAmbientSounds")) this.disableAmbienceTrack(); + }, +}; + +C_WebAudio.updateAudioChannels(); diff --git a/Core/Classes/Resource.js b/Core/Classes/Resource.js index 96999bf..c4dad31 100644 --- a/Core/Classes/Resource.js +++ b/Core/Classes/Resource.js @@ -24,4 +24,9 @@ class Resource { isReady() { return this.state === Enum.RESOURCE_STATE_READY; } + copyBuffer() { + const newBuffer = new ArrayBuffer(this.data.byteLength); + new Uint8Array(newBuffer).set(new Uint8Array(this.data)); + return newBuffer; + } } diff --git a/Core/Enums/AudioChannels.js b/Core/Enums/AudioChannels.js new file mode 100644 index 0000000..2e4d6cb --- /dev/null +++ b/Core/Enums/AudioChannels.js @@ -0,0 +1,3 @@ +Enum.AUDIO_CHANNEL_SFX = "SFX"; +Enum.AUDIO_CHANNEL_MUSIC = "Music"; +Enum.AUDIO_CHANNEL_AMBIENCE = "Ambience"; diff --git a/Core/Initialization/start-render-thread.js b/Core/Initialization/start-render-thread.js index 4654819..6aaedc4 100644 --- a/Core/Initialization/start-render-thread.js +++ b/Core/Initialization/start-render-thread.js @@ -3,6 +3,15 @@ var format = require("util").format; function StartWebClient() { C_Profiling.startTimer("StartWebClient"); + // WebAudio Setup: Requires settings to be loaded + // We can do this here as long as the C_Decoding API was loaded first + C_Decoding.addDecoder(new BuiltinAudioDecoder()); + // Ensure stored settings are applied to any new audio source right away + C_WebAudio.setMusicVolume(C_Settings.getValue("musicVolume")); + C_WebAudio.setEffectsVolume(C_Settings.getValue("sfxVolume")); + C_WebAudio.setAmbienceVolume(C_Settings.getValue("ambienceVolume")); + C_WebAudio.setGlobalVolume(C_Settings.getValue("globalVolume")); + WebClient.initializeLocalizationTables(); L = C_Locales.getLocalizationTable(C_Settings.getValue("activeLocale")); WebClient.setWindowTitle(L["Loading..."]); diff --git a/Interface/Frames/SystemOptionsFrame.js b/Interface/Frames/SystemOptionsFrame.js index ce1fd56..76e1801 100644 --- a/Interface/Frames/SystemOptionsFrame.js +++ b/Interface/Frames/SystemOptionsFrame.js @@ -74,8 +74,8 @@ const SystemOptions = { this.generalSettingsGroup.fieldSet ); useHighQualityStereoCheckbox.setScript("OnClick", () => { - if (!useHighQualityStereoCheckbox.isChecked()) C_WebAudio.disableHighQualityStereo(); - else C_WebAudio.enableHighQualityStereo(); + if (!useHighQualityStereoCheckbox.isChecked()) C_WebAudio.useHighQualityStereo(false); + else C_WebAudio.useHighQualityStereo(true); }); useHighQualityStereoCheckbox.setText(L["Use High-Quality Stereo"]); useHighQualityStereoCheckbox.setChecked(C_Settings.getValue("useHighQualityStereo")); @@ -86,8 +86,8 @@ const SystemOptions = { this.generalSettingsGroup.fieldSet ); soundInBackgroundCheckbox.setScript("OnClick", () => { - if (!soundInBackgroundCheckbox.isChecked()) C_WebAudio.disableBackgroundFade(); - else C_WebAudio.enableBackgroundFade(); + if (!soundInBackgroundCheckbox.isChecked()) C_WebAudio.setBackgroundFade(false); + else C_WebAudio.setBackgroundFade(true); }); soundInBackgroundCheckbox.setText(L["Lower Volume in Background"]); soundInBackgroundCheckbox.setChecked(C_Settings.getValue("fadeSoundInBackground")); diff --git a/Interface/Frames/ViewportContainer.js b/Interface/Frames/ViewportContainer.js index b1652f6..404645a 100644 --- a/Interface/Frames/ViewportContainer.js +++ b/Interface/Frames/ViewportContainer.js @@ -1,2 +1,29 @@ // This is a container for the visible area, including render canvas and the entire UI. let ViewportContainer = new Frame("ViewportContainer"); + +ViewportContainer.handleVisibilityChange = function () { + if (document.hidden) { + C_EventSystem.triggerEvent("APPLICATION_WINDOW_HIDDEN"); + C_WebAudio.setGlobalVolume( + C_Settings.getValue("backgroundAudioVolume") * C_Settings.getValue("globalVolume"), + false + ); + } else { + C_EventSystem.triggerEvent("APPLICATION_WINDOW_VISIBLE"); + C_WebAudio.setGlobalVolume(C_Settings.getValue("globalVolume"), false); + } +}; + +ViewportContainer.onWindowVisibilityChanged = function (isNowHidden) { + if (!C_Settings.getValue("fadeSoundInBackground")) return; + + ViewportContainer.handleVisibilityChange(); +}; + +document.addEventListener( + "visibilitychange", + () => { + ViewportContainer.onWindowVisibilityChanged(document.hidden); + }, + false +); diff --git a/Interface/settings-cache.json b/Interface/settings-cache.json new file mode 100644 index 0000000..7f82d6f --- /dev/null +++ b/Interface/settings-cache.json @@ -0,0 +1,88 @@ +{ + "version": 2, + "defaultAddonLoadedState": true, + "enableLogging": true, + "enableProfiling": true, + "enableSound": true, + "useHighQualityStereo": true, + "fadeSoundInBackground": true, + "enableMusic": true, + "enableSFX": true, + "enableAmbientSounds": true, + "backgroundAudioVolume": 0.25, + "globalVolume": 1, + "musicVolume": 1, + "sfxVolume": 1, + "ambienceVolume": 1, + "defaultMacroIconPath": "Interface/Icons/QuestionMarkBlack3D.png", + "macroCachePath": "Interface/macro-cache.json", + "activeLocale": "enUS", + "showFPS": true, + "fpsUpdateIntervalInFrames": 60, + "useAntialiasing": true, + "enableAlphaChannel": true, + "optimizeSceneLookupOperations": true, + "enableFogEffect": false, + "defaultFogParameters": { + "fogMode": "LINEAR", + "minDistanceInWorldUnits": 15, + "maxDistanceInWorldUnits": 149, + "nearLimitInWorldUnits": 1, + "farLimitInWorldUnits": 1, + "exponentialFogDensity": 15, + "fogColorRGBA": { + "red": 200, + "green": 200, + "blue": 200, + "alpha": 255 + } + }, + "showCoordinateAxes": true, + "wireframeGeometry": true, + "defaultSceneBackgroundColorRGBA": { + "red": 123, + "green": 123, + "blue": 165, + "alpha": 255 + }, + "debugMeshRenderGroupID": 5, + "numSpriteDrawLayers": 10, + "pixelsPerWorldUnit": 32, + "worldServerURL": "ws://localhost:1734", + "messageTokenSeparatorString": "#", + "packMessages": false, + "keybindingActivationMode": "OnKeyUp", + "displayWidgetMetadata": true, + "permittedEventTriggers": { + "ADDON_LOADED": true, + "ADDON_ENABLED": true, + "ADDON_DISABLED": true, + "SCRIPT_EXECUTION_FINISHED": true, + "MAP_CHANGE": true, + "FOG_MODE_UPDATE": true, + "RENDER_LOOP_UPDATE": true, + "FPS_COUNTER_UPDATE": true, + "MESH_PICKED": true, + "POINTER_DOWN": true, + "POINTER_UP": true, + "POINTER_MOVE": true, + "POINTER_WHEEL": true, + "POINTER_PICK": true, + "POINTER_TAP": true, + "POINTER_DOUBLETAP": true, + "KEY_DOWN": true, + "KEY_UP": true, + "PLAYER_MONEY_CHANGE": true, + "PLAYER_WEIGHT_CHANGE": true, + "UNIT_HEALTH_CHANGE": true, + "UNIT_MAX_HEALTH_CHANGE": true, + "UNIT_POWER_CHANGE": true, + "UNIT_MAX_POWER_CHANGE": true, + "WEBSOCKET_INCOMING_MESSAGE": true, + "WEBSOCKET_CONNECTION": true, + "WEBSOCKET_STATUS_CHANGE": true, + "MACRO_UPDATE": true, + "APPLICATION_SHUTDOWN": true, + "FOG_MODE_UPDATED": true + } +} \ No newline at end of file diff --git a/Tests/API/C_WebAudio/AudioSource.js b/Tests/API/C_WebAudio/AudioSource.js new file mode 100644 index 0000000..22bb89c --- /dev/null +++ b/Tests/API/C_WebAudio/AudioSource.js @@ -0,0 +1,318 @@ +describe("AudioSource", () => { + const previousGlobalVolume = C_WebAudio.getGlobalVolume(); // Backup, to restore afterwards + before(() => { + C_WebAudio.setGlobalVolume(0); + }); // No need to play actual audio during these tests + + after(() => { + C_WebAudio.setGlobalVolume(previousGlobalVolume); + }); + + const testSoundFilePath = path.join(WEBCLIENT_FIXTURES_DIR, "WebAudio", "dumbo.ogg"); + const instance = new AudioSource(testSoundFilePath); + + it("should be exported into the global environment", () => { + assertEquals(typeof AudioSource.constructor, "function"); + }); + + const exportedApiSurface = [ + "getPlaybackRate", + "setPlaybackRate", + "fadeIn", + "fadeOut", + "pause", + "resume", + "getVolume", + "setVolume", + "isPlaying", + "startPlaying", + "stopPlaying", + "getFilePath", + "destroy", + "getUniqueID", + "getCurrentTime", + ]; + + exportedApiSurface.forEach((namedExport) => { + it("should export function " + namedExport, () => { + assertEquals(typeof instance[namedExport], "function"); + }); + }); + + describe("getFilePath", () => { + it("should return the resource URL that was used to create the audio source", () => { + assertEquals(instance.getFilePath(), testSoundFilePath); + }); + + it("should always return an absolute path", () => { + const path = require("path"); + assertTrue(path.isAbsolute(instance.getFilePath())); + }); + }); + + describe("isPlaying", () => { + const instance = new AudioSource(testSoundFilePath); + it("should return false before playback has started", () => { + assertFalse(instance.isPlaying()); + }); + + // it("should return true after playback has started", (done) => { + // const someAudioSource = new AudioSource(testSoundFilePath); + // someAudioSource.startPlaying(); + // setTimeout(() => { + // assertTrue(someAudioSource.isPlaying()); + // done(); + // }, 250); // Give BJS some time to update the sound (for some reason it takes forever here...?) + // }); + + // it("should return false while playback is paused", (done) => { + // instance.pause(); + // setTimeout(() => { + // assertFalse(instance.isPlaying()); + // done(); + // }, 50); // Give BJS some time to update the sound + // }); + + // it("should return true after playback has resumed", (done) => { + // instance.resume(); + // setTimeout(() => { + // assertTrue(instance.isPlaying()); + // done(); + // }, 50); // Give BJS some time to update the sound + // }); + + // it("should return false after playback was stopped", (done) => { + // instance.stopPlaying(); + // setTimeout(() => { + // assertFalse(instance.isPlaying()); + // done(); + // }, 50); // Give BJS some time to update the sound + // }); + }); + describe("getVolume", () => { + it("should return the updated volume gain after it has been changed", () => { + // Setup + const previousVolume = instance.getVolume(); + + instance.setVolume(0.123); + assertEquals(instance.getVolume(), 0.123); + + // Teardown + instance.setVolume(previousVolume); + }); + }); + + describe("setVolume", () => { + it("should be able to adjust the volume if a valid volume gain was passed", () => {}); + + const errorMessage = "Usage: AudioSource.setVolume(Number volumeGain, Number transitionTimeInMilliseconds)"; + const typeError = new TypeError(errorMessage); + it("should throw a TypeError if the volume gain was of an invalid type", () => { + assertThrows(() => instance.setVolume(null), typeError); + assertThrows(() => instance.setVolume("hi"), typeError); + assertThrows(() => instance.setVolume(() => console.log("test")), typeError); + assertThrows(() => instance.setVolume({}), typeError); + assertThrows(() => instance.setVolume([]), typeError); + }); + + it("should throw a RangeError if the volume gain is negative", () => { + const rangeError = new RangeError(AudioSource.ERROR_NEGATIVE_VOLUME_GAIN); + assertThrows(() => instance.setVolume(-1), rangeError); + }); + + it("should throw a TypeError if the transition time was of an invalid type", () => { + assertThrows(() => instance.setVolume(1, null), typeError); + assertThrows(() => instance.setVolume(1, "hi"), typeError); + assertThrows(() => instance.setVolume(1, () => console.log("test")), typeError); + assertThrows(() => instance.setVolume(1, {}), typeError); + assertThrows(() => instance.setVolume(1, []), typeError); + }); + + it("should throw a RangeError if the transition time is negative", () => { + const rangeError = new RangeError(AudioSource.ERROR_NEGATIVE_TRANSITION_TIME); + assertThrows(() => instance.setVolume(1, -1), rangeError); + }); + }); + + describe("getCurrentTime", () => { + const anotherInstance = new AudioSource(testSoundFilePath); + + it("should start at zero when the audio source was initialized", () => { + assertEquals(anotherInstance.getCurrentTime(), 0); + }); + + // it("should advance when the audio source has started playing", (done) => { + // anotherInstance.startPlaying(); + // setTimeout(() => { + // // Due to how unreliable JavaScript timers are, we can't be more precise here + // assertTrue(anotherInstance.getCurrentTime() > 0); + // anotherInstance.stopPlaying(); + // done(); + // }, 50); // Give BJS some time to update the sound + // }); + + // it("should advance when the audio source has been resumed after pausing it", (done) => { + // // This also covers pause and resume, respectively, at least as far as possible + // let previousTime = anotherInstance.getCurrentTime(); + // anotherInstance.startPlaying(); + // setTimeout(() => { + // // Due to how unreliable JavaScript timers are, we can't be more precise here + // assertTrue(anotherInstance.getCurrentTime() > previousTime); + // previousTime = anotherInstance.getCurrentTime(); + // anotherInstance.pause(); + // }, 50); + + // setTimeout(() => { + // // It should not have advanced here + // assertEquals(anotherInstance.getCurrentTime(), previousTime); + // previousTime = anotherInstance.getCurrentTime(); + // anotherInstance.resume(); + // }, 100); + + // setTimeout(() => { + // // It should have advanced here once again + // assertTrue(anotherInstance.getCurrentTime() > previousTime); + // anotherInstance.stopPlaying(); + // done(); + // }, 150); + // }); + }); + + describe("setPlaybackRate", () => { + // Also covers getPlaybackRate, coincidentally + it("should be able to adjust the playback rate", () => { + instance.setPlaybackRate(0.15); + const newPlaybackRate = instance.getPlaybackRate(); + assertEquals(newPlaybackRate, 0.15); + }); + + const rangeError = new RangeError(AudioSource.ERROR_INVALID_PLAYBACK_RATE); + it("should throw a RangeError if the playback rate is negative", () => { + assertThrows(() => instance.setPlaybackRate(-1), rangeError); + }); + + it("should throw a RangeError if the playback rate is zero", () => { + assertThrows(() => instance.setPlaybackRate(0), rangeError); + }); + + const typeError = new TypeError("Usage: AudioSource.setPlaybackRate(Number newPlaybackRate)"); + it("should throw a TypeError if the playback rate is not a String", () => { + assertThrows(() => instance.setPlaybackRate(null), typeError); + assertThrows(() => instance.setPlaybackRate(undefined), typeError); + assertThrows(() => instance.setPlaybackRate("Hi"), typeError); + assertThrows(() => instance.setPlaybackRate([]), typeError); + assertThrows(() => instance.setPlaybackRate({}), typeError); + assertThrows(() => instance.setPlaybackRate(() => console.log("test")), typeError); + assertThrows(() => instance.setPlaybackRate(NaN), typeError); + }); + }); + + describe("fadeIn", () => { + // it("should fade the volume to the desired level over time", (done) => { + // // This test can't be reversed, as BJS adjusts the volume setting instantly but doesn't expose the transitional values... + // const volumeGain = 0.25; + // const fadeInTimeInMilliseconds = 50; + // instance.setVolume(0); + // instance.fadeIn(volumeGain, fadeInTimeInMilliseconds); + // setTimeout(() => { + // assertEquals(instance.getVolume(), volumeGain); + // done(); + // }, fadeInTimeInMilliseconds); + // }); + + it("should apply volume changes instantly if the fade time is zero", () => { + // This, too, is unreliable because the volume change is always saved instantly by BJS... + const volumeGain = 0.35; + const fadeInTimeInMilliseconds = 0; + instance.fadeIn(volumeGain, fadeInTimeInMilliseconds); + assertEquals(instance.getVolume(), volumeGain); + }); + + const typeError = new TypeError( + "Usage: AudioSource.fadeIn(Number volumeGain, Number transitionTimeInMilliseconds)" + ); + it("should throw a TypeError if either of the parameters isn't a Number or undefined", () => { + assertThrows(() => instance.fadeIn(null), typeError); + assertThrows(() => instance.fadeIn(1, null), typeError); + assertThrows(() => instance.fadeIn("hi", 1), typeError); + assertThrows(() => instance.fadeIn(1, "hi"), typeError); + assertThrows(() => instance.fadeIn(() => console.log("test"), 1), typeError); + assertThrows(() => instance.fadeIn(1, () => console.log("test")), typeError); + assertThrows(() => instance.fadeIn({}, 1), typeError); + assertThrows(() => instance.fadeIn(1, {}), typeError); + assertThrows(() => instance.fadeIn([], 1), typeError); + assertThrows(() => instance.fadeIn(1, []), typeError); + }); + + it("should throw a RangeError if the volume level is negative", () => { + const rangeError = new RangeError(AudioSource.ERROR_NEGATIVE_VOLUME_GAIN); + assertThrows(() => instance.fadeIn(-1), rangeError); + }); + + it("should throw a RangeError if the fade-in time is negative", () => { + const rangeError = new RangeError(AudioSource.ERROR_NEGATIVE_TRANSITION_TIME); + assertThrows(() => instance.fadeIn(1, -42), rangeError); + }); + }); + + describe("fadeOut", () => { + // it("should fade the volume out over time", (done) => { + // // This test can't be reversed, as BJS adjusts the volume setting instantly but doesn't expose the transitional values... + // const fadeOutTimeInMilliseconds = 50; + // instance.setVolume(0.5); + // instance.fadeOut(fadeOutTimeInMilliseconds); + // setTimeout(() => { + // assertEquals(instance.getVolume(), 0); + // done(); + // }, fadeOutTimeInMilliseconds); + // }); + + it("should apply volume changes instantly if the fade time is zero", () => { + // This, too, is unreliable because the volume change is always saved instantly by BJS... + const fadeOutTimeInMilliseconds = 0; + instance.fadeOut(fadeOutTimeInMilliseconds); + assertEquals(instance.getVolume(), 0); + }); + + const typeError = new TypeError("Usage: AudioSource.fadeOut(Number timeToFadeOutCompletelyInMilliseconds)"); + it("should throw a TypeError if the fade-out time isn't a Number or undefined", () => { + assertThrows(() => instance.fadeOut(null), typeError); + assertThrows(() => instance.fadeOut("hi"), typeError); + assertThrows(() => instance.fadeOut(() => console.log("test")), typeError); + assertThrows(() => instance.fadeOut({}), typeError); + assertThrows(() => instance.fadeOut([]), typeError); + }); + + it("should throw a RangeError if the fade-out time is negative", () => { + const rangeError = new RangeError(AudioSource.ERROR_NEGATIVE_TRANSITION_TIME); + assertThrows(() => instance.fadeOut(-42), rangeError); + }); + }); + + describe("getUniqueID", () => { + it("should return a string identifier", () => { + // We can't guarantee it will be unique, just that it will be extremely unlikely to not be unique... + const guid = instance.getUniqueID(); + assertEquals(typeof guid, "string"); + }); + + it("should return the same identifier if called repeatedly", () => { + const guid1 = instance.getUniqueID(); + const guid2 = instance.getUniqueID(); + const guid3 = instance.getUniqueID(); + + assertEquals(guid1, guid2); + assertEquals(guid1, guid3); + }); + }); + + describe("destroy", () => { + it("should return true when successfully disposing of the audio buffer", () => { + assertTrue(instance.destroy()); + }); + + it("should return false when the audio buffer has already been disposed of", () => { + assertFalse(instance.destroy()); + }); + }); +}); diff --git a/Tests/API/C_WebAudio/BuiltinAudioDecoder.js b/Tests/API/C_WebAudio/BuiltinAudioDecoder.js index b6a6207..1f2a557 100644 --- a/Tests/API/C_WebAudio/BuiltinAudioDecoder.js +++ b/Tests/API/C_WebAudio/BuiltinAudioDecoder.js @@ -4,7 +4,7 @@ describe("BuiltinAudioDecoder", () => { assertEquals(decoder.constructor.name, "BuiltinAudioDecoder"); }); - let exportedApiSurface = ["getSupportedFileTypes", "decode"]; + const exportedApiSurface = ["getSupportedFileTypes", "decode"]; exportedApiSurface.forEach((namedExport) => { it("should export function " + namedExport, () => { diff --git a/Tests/API/C_WebAudio/canPlayMP3.js b/Tests/API/C_WebAudio/canPlayMP3.js new file mode 100644 index 0000000..eb5e01a --- /dev/null +++ b/Tests/API/C_WebAudio/canPlayMP3.js @@ -0,0 +1,11 @@ +describe("canPlayMP3", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.canPlayMP3, "function"); + }); + + it("should return true after the audio engine was initialized", () => { + // This may only fail if the audio engine isn't yet initialized or some WebAudio issues occur + // It's probably best to just permanently test it to be sure it always work + assertTrue(C_WebAudio.canPlayMP3()); + }); +}); diff --git a/Tests/API/C_WebAudio/canPlayOGG.js b/Tests/API/C_WebAudio/canPlayOGG.js new file mode 100644 index 0000000..fe12536 --- /dev/null +++ b/Tests/API/C_WebAudio/canPlayOGG.js @@ -0,0 +1,11 @@ +describe("canPlayOGG", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.canPlayOGG, "function"); + }); + + it("should return true after the audio engine was initialized", () => { + // This may only fail if the audio engine isn't yet initialized or some WebAudio issues occur + // It's probably best to just permanently test it to be sure it always work + assertTrue(C_WebAudio.canPlayOGG()); + }); +}); diff --git a/Tests/API/C_WebAudio/canPlayWAV.js b/Tests/API/C_WebAudio/canPlayWAV.js new file mode 100644 index 0000000..2bfe63a --- /dev/null +++ b/Tests/API/C_WebAudio/canPlayWAV.js @@ -0,0 +1,11 @@ +describe("canPlayWAV", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.canPlayWAV, "function"); + }); + + it("should return true after the audio engine was initialized", () => { + // This may only fail if the audio engine isn't yet initialized or some WebAudio issues occur + // It's probably best to just permanently test it to be sure it always work + assertTrue(C_WebAudio.canPlayWAV()); + }); +}); diff --git a/Tests/API/C_WebAudio/createTrack.js b/Tests/API/C_WebAudio/createTrack.js new file mode 100644 index 0000000..ba8689d --- /dev/null +++ b/Tests/API/C_WebAudio/createTrack.js @@ -0,0 +1,51 @@ +describe("createTrack", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.createTrack, "function"); + }); + it("should create a new audio channel if the track ID is still available", () => { + const trackID = ""; + const expectedError = new RangeError(C_WebAudio.ERROR_INVALID_TRACK_ID + ": " + trackID); + assertThrows(() => C_WebAudio.getTrackInfo(trackID), expectedError); // Just making sure + + const channel = C_WebAudio.createTrack(trackID); + assertTypeOf(channel, "AudioTrack"); + }); + + const expectedErrorMessage = "Usage: createTrack(String trackID)"; + const typeError = new TypeError(expectedErrorMessage); + it("should throw a TypeError if no track ID was passed", () => { + assertThrows(() => C_WebAudio.createTrack(), typeError); + }); + + it("should throw a TypeError if the track ID is not a String", () => { + // TODO: DRY, move to fixtures + const invalidtrackIDs = [ + 42, + [42], + { 42: 42 }, + () => { + let there = "peace"; + }, + C_WebAudio, + ]; + + invalidtrackIDs.forEach((invalidtrackID) => { + assertThrows(() => C_WebAudio.createTrack(invalidtrackID), typeError); + }); + }); + + it("should return the existing channel if the track ID is already in use", () => { + const trackID = ""; + const expectedError = new RangeError(C_WebAudio.ERROR_INVALID_TRACK_ID + ": " + trackID); + assertThrows(() => C_WebAudio.getTrackInfo(trackID), expectedError); // Just making sure + + const channel = C_WebAudio.createTrack(trackID); + assertTypeOf(channel, "AudioTrack"); + + const shouldBeTheSameChannel = C_WebAudio.createTrack(trackID); + assertTypeOf(shouldBeTheSameChannel, "AudioTrack"); + assertDeepEquals(shouldBeTheSameChannel, channel); + }); +}); + +// TODO: Cleanup - remove all the channels diff --git a/Tests/API/C_WebAudio/getAmbienceVolume.js b/Tests/API/C_WebAudio/getAmbienceVolume.js new file mode 100644 index 0000000..be16a65 --- /dev/null +++ b/Tests/API/C_WebAudio/getAmbienceVolume.js @@ -0,0 +1,24 @@ +describe("getAmbienceVolume", () => { + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getAmbienceVolume())); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.getAmbienceVolume, "function"); // tbd rename master to global? + }); + + it("should be able to retrieve the volume of the Ambience track", () => { + const newVolumeGain = 0.5; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(C_WebAudio.getAmbienceVolume(), newVolumeGain); + + C_WebAudio.setAmbienceVolume(newVolumeGain); + assertEquals(C_WebAudio.getAmbienceVolume(), newVolumeGain); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setAmbienceVolume(C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/getEffectsVolume.js b/Tests/API/C_WebAudio/getEffectsVolume.js new file mode 100644 index 0000000..c99ddb2 --- /dev/null +++ b/Tests/API/C_WebAudio/getEffectsVolume.js @@ -0,0 +1,24 @@ +describe("getEffectsVolume", () => { + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getEffectsVolume())); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.getEffectsVolume, "function"); // tbd rename master to global? + }); + + it("should be able to retrieve the volume of the SFX track", () => { + const newVolumeGain = 0.5; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(C_WebAudio.getEffectsVolume(), newVolumeGain); + + C_WebAudio.setEffectsVolume(newVolumeGain); + assertEquals(C_WebAudio.getEffectsVolume(), newVolumeGain); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setEffectsVolume(C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/getGlobalVolume.js b/Tests/API/C_WebAudio/getGlobalVolume.js new file mode 100644 index 0000000..33cf87f --- /dev/null +++ b/Tests/API/C_WebAudio/getGlobalVolume.js @@ -0,0 +1,24 @@ +describe("getGlobalVolume", () => { + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getGlobalVolume())); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.getGlobalVolume, "function"); // tbd rename master to global? + }); + + it("should be able to retrieve the global volume of the audio engine", () => { + const newVolumeGain = 0.5; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(C_WebAudio.getGlobalVolume(), newVolumeGain); + + C_WebAudio.setGlobalVolume(newVolumeGain); + assertEquals(C_WebAudio.getGlobalVolume(), newVolumeGain); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setGlobalVolume(C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/getMusicVolume.js b/Tests/API/C_WebAudio/getMusicVolume.js new file mode 100644 index 0000000..5885a57 --- /dev/null +++ b/Tests/API/C_WebAudio/getMusicVolume.js @@ -0,0 +1,24 @@ +describe("getMusicVolume", () => { + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getMusicVolume())); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.getMusicVolume, "function"); // tbd rename master to global? + }); + + it("should be able to retrieve the volume of the Music track", () => { + const newVolumeGain = 0.5; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(C_WebAudio.getMusicVolume(), newVolumeGain); + + C_WebAudio.setMusicVolume(newVolumeGain); + assertEquals(C_WebAudio.getMusicVolume(), newVolumeGain); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setMusicVolume(C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/getSupportedFileFormats.js b/Tests/API/C_WebAudio/getSupportedFileFormats.js new file mode 100644 index 0000000..3703a5d --- /dev/null +++ b/Tests/API/C_WebAudio/getSupportedFileFormats.js @@ -0,0 +1,23 @@ +describe("getSupportedFileFormats", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.getSupportedFileFormats, "function"); + }); + + const supportedFormats = C_WebAudio.getSupportedFileFormats(); + it("should indicate that MP3 is a supported file format", () => { + // This may only fail if the audio engine isn't yet initialized or some WebAudio issues occur + // It's probably best to just permanently test it to be sure it always work + assertTrue(supportedFormats["mp3"]); + }); + + it("should indicate that OGG Vorbis is a supported file format", () => { + // This may only fail if the audio engine isn't yet initialized or some WebAudio issues occur + // It's probably best to just permanently test it to be sure it always work + assertTrue(supportedFormats["ogg"]); + }); + + it("should indicate that WAV is a supported file format", () => { + // This should always be true, as waveform (WAV) is the most basic format... but you never know? + assertTrue(supportedFormats["wav"]); + }); +}); diff --git a/Tests/API/C_WebAudio/getTrackInfo.js b/Tests/API/C_WebAudio/getTrackInfo.js new file mode 100644 index 0000000..975ad97 --- /dev/null +++ b/Tests/API/C_WebAudio/getTrackInfo.js @@ -0,0 +1,22 @@ +describe("getTrackInfo", () => { + it("should throw a TypeError if no track ID was passed", () => { + const expectedError = new TypeError("Usage: getTrackInfo(String trackID)"); + assertThrows(() => C_WebAudio.getTrackInfo(), expectedError); + }); + + it("should throw a TypeError if no track ID was passed", () => { + const expectedError = new TypeError("Usage: getTrackInfo(String trackID)"); + assertThrows(() => C_WebAudio.getTrackInfo(), expectedError); + }); + + it("should throw a RangeError if an invalid track ID was passed", () => { + const trackID = "doesNotExist"; + const expectedError = new RangeError(C_WebAudio.ERROR_INVALID_TRACK_ID + ": " + trackID); + + assertThrows(() => C_WebAudio.getTrackInfo(trackID), expectedError); + }); + + // valid track, get AudioTrack instance, same as before +}); + +// TODO Review setup/teardown, should be stateless diff --git a/Tests/API/C_WebAudio/getTrackVolume.js b/Tests/API/C_WebAudio/getTrackVolume.js new file mode 100644 index 0000000..81c6757 --- /dev/null +++ b/Tests/API/C_WebAudio/getTrackVolume.js @@ -0,0 +1,19 @@ +describe("getTrackVolume", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.setTrackVolume, "function"); + }); + + const expectedErrorMessage = "Usage: getTrackVolume(String trackID)"; + const typeError = new TypeError(expectedErrorMessage); + it("should throw a TypeError if no track ID was passed", () => { + assertThrows(() => C_WebAudio.getTrackVolume(), typeError); + }); + + it("should throw a RangeError if an invalid track ID was passed", () => { + const invalidTrackID = "12q4rrwdftyu"; + const expectedError = RangeError(C_WebAudio.ERROR_INVALID_TRACK_ID + ": " + invalidTrackID); + + assertThrows(() => C_WebAudio.getTrackInfo(invalidTrackID), expectedError); // Just making sure + assertThrows(() => C_WebAudio.getTrackVolume(invalidTrackID), expectedError); + }); +}); diff --git a/Tests/API/C_WebAudio/isAudioAvailable.js b/Tests/API/C_WebAudio/isAudioAvailable.js new file mode 100644 index 0000000..351e1a8 --- /dev/null +++ b/Tests/API/C_WebAudio/isAudioAvailable.js @@ -0,0 +1,11 @@ +describe("isAudioAvailable", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.isAudioAvailable, "function"); + }); + + it("should return true after the audio engine was initialized", () => { + // This may only fail if the audio engine isn't yet initialized or some WebAudio issues occur + // It's probably best to just permanently test it to be sure it always work + assertTrue(C_WebAudio.isAudioAvailable()); + }); +}); diff --git a/Tests/API/C_WebAudio/isAudioContextInitialized.js b/Tests/API/C_WebAudio/isAudioContextInitialized.js new file mode 100644 index 0000000..6069f8d --- /dev/null +++ b/Tests/API/C_WebAudio/isAudioContextInitialized.js @@ -0,0 +1,11 @@ +describe("isAudioContextInitialized", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.isAudioContextInitialized, "function"); + }); + + it("should return true after the audio engine was initialized", () => { + // This may only fail if the audio engine isn't yet initialized or some WebAudio issues occur + // It's probably best to just permanently test it to be sure it always work + assertTrue(C_WebAudio.isAudioContextInitialized()); + }); +}); diff --git a/Tests/API/C_WebAudio/playAmbientSound.js b/Tests/API/C_WebAudio/playAmbientSound.js new file mode 100644 index 0000000..3a8486a --- /dev/null +++ b/Tests/API/C_WebAudio/playAmbientSound.js @@ -0,0 +1,5 @@ +describe("playSound", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.playSound, "function"); + }); +}); diff --git a/Tests/API/C_WebAudio/playMusic.js b/Tests/API/C_WebAudio/playMusic.js new file mode 100644 index 0000000..9c42845 --- /dev/null +++ b/Tests/API/C_WebAudio/playMusic.js @@ -0,0 +1,129 @@ +describe("playMusic", () => { + // beforeEach(() => C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC).purgeAllVoices()); + afterEach(() => C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC).purgeAllVoices()); + + // We have to wait for the audio engine to load files from disk and start playback... This test is sketchy :( + const ANTICIPATED_IO_DELAY = 150; // Not sure if this works on the CI runner, will have to see... + + const path = require("path"); + const testSoundFilePath = path.join(WEBCLIENT_FIXTURES_DIR, "WebAudio", "dumbo.ogg"); + const anotherSoundFilePath = path.join(WEBCLIENT_FIXTURES_DIR, "WebAudio", "dumbo2.ogg"); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.playMusic, "function"); + }); + + it("should be able to play music if a valid file path was passed", () => { + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC); + + const validFilePath = testSoundFilePath; + assertEquals(musicTrack.getNumAudioSources(), 0); + const soundHandleID = C_WebAudio.playMusic(validFilePath); + assertEquals(musicTrack.getNumAudioSources(), 1); + const audioSource = musicTrack.getAudioSource(soundHandleID); + assertEquals(audioSource.getFilePath(), validFilePath); + + C_WebAudio.stopMusic(); + musicTrack.purgeAllVoices(); // The WebAudio API doesn't do this since the track is still fading out + // setTimeout(() => { + assertEquals(0, musicTrack.getNumActiveVoices()); + assertEquals(0, musicTrack.getNumAudioSources()); + // done(); + // }, 2*C_WebAudio.musicFadeoutTimeInMilliseconds); + }); + + const expectedErrorMessage = "Usage: C_WebAudio.playMusic(String filePath)"; + const typeError = new TypeError(expectedErrorMessage); + it("should throw a TypeError if no file path was passed", () => { + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC); // TODO getMusicTrack, getAmbienceTrack, getEffectsTrack shortcuts + assertEquals(0, musicTrack.getNumAudioSources()); + assertThrows(() => C_WebAudio.playMusic(), typeError); + assertEquals(0, musicTrack.getNumAudioSources()); + }); + + it("should throw a TypeError if the file path is not a String", () => { + // TODO: DRY, move to fixtures + const invalidValues = [ + 42, + [42], + { 42: 42 }, + () => { + let there = "peace"; + }, + C_WebAudio, + ]; + + invalidValues.forEach((invalidValue) => { + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC); // TODO getMusicTrack, getAmbienceTrack, getEffectsTrack shortcuts + assertEquals(0, musicTrack.getNumAudioSources()); + assertThrows(() => C_WebAudio.playMusic(invalidValue), typeError); + assertEquals(0, musicTrack.getNumAudioSources()); + }); + }); + + it("should stop playing the previous music", function () { + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC); + assertEquals(0, musicTrack.getNumAudioSources()); + const soundHandleID = C_WebAudio.playMusic(testSoundFilePath); + assertEquals(1, musicTrack.getNumAudioSources()); + const audioSource = musicTrack.getAudioSource(soundHandleID); + + // This will actually play the sounds even in CLI mode as it's still running Chromium + // Since that's rather annoying, let's mute the sound temporarily + // const originalVolume = C_WebAudio.getMusicVolume(); + // C_WebAudio.setMusicVolume(0); + + // setTimeout(() => { + // assertTrue(audioSource.isPlaying()); + + // // It doesn't really matter that both audio sources play back the same file, as they're two independent streams + + // assertEquals(musicTrack.getNumAudioSources(), 1); + // const newHandle = C_WebAudio.playMusic(anotherSoundFilePath); + // assertEquals(musicTrack.getNumAudioSources(), 1); + // const newAudioSource = musicTrack.getAudioSource(newHandle); + + // // Technically, playback might not have started yet, but we assume it will shortly + // setTimeout(() => { + // assertFalse(audioSource.isPlaying()); + // assertTrue(newAudioSource.isPlaying()); + + // C_WebAudio.stopMusic(); + // assertEquals(0, musicTrack.getNumAudioSources()); + // C_WebAudio.setMusicVolume(originalVolume); + + // done(); + // }, ANTICIPATED_IO_DELAY); + // }, ANTICIPATED_IO_DELAY); + }); + + // This might be important during scene transitions, if the same track applies to both + it("should do nothing if the same music source is requested while it's still playing", function () { + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC); + assertEquals(0, musicTrack.getNumAudioSources()); + const handle = C_WebAudio.playMusic(testSoundFilePath); + assertEquals(1, musicTrack.getNumAudioSources()); + + // setTimeout(() => { + // const handle2 = C_WebAudio.playMusic(testSoundFilePath); + // assertEquals(1, musicTrack.getNumAudioSources()); + // const handle3 = C_WebAudio.playMusic(testSoundFilePath); + // assertEquals(1, musicTrack.getNumAudioSources()); + // const handle4 = C_WebAudio.playMusic(testSoundFilePath); + // assertEquals(1, musicTrack.getNumAudioSources()); + + // // It should just return the currently playing music, without creating new audio sources + // assertEquals(handle, handle2); + // assertEquals(handle, handle3); + // assertEquals(handle, handle4); + + // // Make sure it's still playing, though... + // const audioSource = musicTrack.getAudioSource(handle); + // assertTrue(audioSource.isPlaying()); + // assertEquals(1, musicTrack.getNumAudioSources()); + // C_WebAudio.stopMusic(); + // assertEquals(0, musicTrack.getNumAudioSources()); + // done(); + // }, ANTICIPATED_IO_DELAY); + }); +}); diff --git a/Tests/API/C_WebAudio/playSound.js b/Tests/API/C_WebAudio/playSound.js new file mode 100644 index 0000000..3a8486a --- /dev/null +++ b/Tests/API/C_WebAudio/playSound.js @@ -0,0 +1,5 @@ +describe("playSound", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.playSound, "function"); + }); +}); diff --git a/Tests/API/C_WebAudio/playSoundEffect.js b/Tests/API/C_WebAudio/playSoundEffect.js new file mode 100644 index 0000000..3a8486a --- /dev/null +++ b/Tests/API/C_WebAudio/playSoundEffect.js @@ -0,0 +1,5 @@ +describe("playSound", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.playSound, "function"); + }); +}); diff --git a/Tests/API/C_WebAudio/setAmbienceVolume.js b/Tests/API/C_WebAudio/setAmbienceVolume.js new file mode 100644 index 0000000..f3f0895 --- /dev/null +++ b/Tests/API/C_WebAudio/setAmbienceVolume.js @@ -0,0 +1,77 @@ +describe("setAmbienceVolume", () => { + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getAmbienceVolume())); + + const path = require("path"); + const someMusicFile = path.join(WEBCLIENT_FIXTURES_DIR, "WebAudio", "dumbo.ogg"); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.setAmbienceVolume, "function"); + }); + + it("should be able to set the volume of the Ambience track", () => { + const previousVolumeGain = C_WebAudio.getAmbienceVolume(); + const newVolumeGain = 0.1234567; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setAmbienceVolume(newVolumeGain); + assertApproximatelyEquals(C_WebAudio.getAmbienceVolume(), newVolumeGain); + }); + + // Internally, BabylonJS appears to convert negative master gain values to positive ones + // Since that seems counter-intuitive and weird, we simply disallow it + it("should throw a RangeError when the volume level is negative", () => { + const previousVolumeGain = C_WebAudio.getAmbienceVolume(); + const newVolumeGain = -0.1234567; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + const expectedError = new RangeError(C_WebAudio.ERROR_NEGATIVE_VOLUME_GAIN); + assertThrows(() => C_WebAudio.setAmbienceVolume(newVolumeGain), expectedError); + }); + + it("should apply the volume level to existing audio sources on the track", () => { + const audioSource = new AudioSource(someMusicFile); + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_AMBIENCE); + const soundHandleID = musicTrack.addAudioSource(audioSource); + + const previousVolumeGain = C_WebAudio.getAmbienceVolume(); + const newVolumeGain = 0.5247; + + assertEquals(audioSource.getVolume(), previousVolumeGain); + C_WebAudio.setAmbienceVolume(newVolumeGain); + assertEquals(audioSource.getVolume(), newVolumeGain); + + musicTrack.removeAudioSource(soundHandleID); + }); + + it("should apply the volume level to newly-created audio sources on the track", () => { + const audioSource = new AudioSource(someMusicFile); + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_AMBIENCE); + + const previousVolumeGain = C_WebAudio.getAmbienceVolume(); + const newVolumeGain = 0.87483; + + // Just to be safe + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setAmbienceVolume(newVolumeGain); + + // It should use 1 by default, or at least something different from the arbitrary new volume gain defined here + // So this SHOULD test that the new volume hasn't yet been applied, regardless of what the current one is + assertNotEquals(audioSource.getVolume(), newVolumeGain); + const soundHandleID = musicTrack.addAudioSource(audioSource); + assertEquals(audioSource.getVolume(), newVolumeGain); + + musicTrack.removeAudioSource(soundHandleID); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setAmbienceVolume(C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/setEffectsVolume.js b/Tests/API/C_WebAudio/setEffectsVolume.js new file mode 100644 index 0000000..2c0f267 --- /dev/null +++ b/Tests/API/C_WebAudio/setEffectsVolume.js @@ -0,0 +1,77 @@ +describe("setEffectsVolume", () => { + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getEffectsVolume())); + + const path = require("path"); + const someMusicFile = path.join(WEBCLIENT_FIXTURES_DIR, "WebAudio", "dumbo.ogg"); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.setEffectsVolume, "function"); + }); + + it("should be able to set the volume of the SFX track", () => { + const previousVolumeGain = C_WebAudio.getEffectsVolume(); + const newVolumeGain = 0.1234567; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setEffectsVolume(newVolumeGain); + assertApproximatelyEquals(C_WebAudio.getEffectsVolume(), newVolumeGain); + }); + + // Internally, BabylonJS appears to convert negative master gain values to positive ones + // Since that seems counter-intuitive and weird, we simply disallow it + it("should throw a RangeError when the volume level is negative", () => { + const previousVolumeGain = C_WebAudio.getEffectsVolume(); + const newVolumeGain = -0.1234567; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + const expectedError = new RangeError(C_WebAudio.ERROR_NEGATIVE_VOLUME_GAIN); + assertThrows(() => C_WebAudio.setEffectsVolume(newVolumeGain), expectedError); + }); + + it("should apply the volume level to existing audio sources on the track", () => { + const audioSource = new AudioSource(someMusicFile); + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_SFX); + const soundHandleID = musicTrack.addAudioSource(audioSource); + + const previousVolumeGain = C_WebAudio.getEffectsVolume(); + const newVolumeGain = 0.5247; + + assertEquals(audioSource.getVolume(), previousVolumeGain); + C_WebAudio.setEffectsVolume(newVolumeGain); + assertEquals(audioSource.getVolume(), newVolumeGain); + + musicTrack.removeAudioSource(soundHandleID); + }); + + it("should apply the volume level to newly-created audio sources on the track", () => { + const audioSource = new AudioSource(someMusicFile); + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_SFX); + + const previousVolumeGain = C_WebAudio.getEffectsVolume(); + const newVolumeGain = 0.87483; + + // Just to be safe + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setEffectsVolume(newVolumeGain); + + // It should use 1 by default, or at least something different from the arbitrary new volume gain defined here + // So this SHOULD test that the new volume hasn't yet been applied, regardless of what the current one is + assertNotEquals(audioSource.getVolume(), newVolumeGain); + const soundHandleID = musicTrack.addAudioSource(audioSource); + assertEquals(audioSource.getVolume(), newVolumeGain); + + musicTrack.removeAudioSource(soundHandleID); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setEffectsVolume(C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/setGlobalVolume.js b/Tests/API/C_WebAudio/setGlobalVolume.js new file mode 100644 index 0000000..3c3978c --- /dev/null +++ b/Tests/API/C_WebAudio/setGlobalVolume.js @@ -0,0 +1,38 @@ +describe("setGlobalVolume", () => { + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getGlobalVolume())); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.setGlobalVolume, "function"); + }); + + it("should be able to set the global volume of the audio engine", () => { + const previousVolumeGain = C_WebAudio.getGlobalVolume(); + const newVolumeGain = 0.1234567; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setGlobalVolume(newVolumeGain); + assertApproximatelyEquals(C_WebAudio.getGlobalVolume(), newVolumeGain); + }); + + // Internally, BabylonJS appears to convert negative master gain values to positive ones + // Since that seems counter-intuitive and weird, we simply disallow it + it("should throw a RangeError when the volume level is negative", () => { + const previousVolumeGain = C_WebAudio.getGlobalVolume(); + const newVolumeGain = -0.1234567; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + const expectedError = new RangeError(C_WebAudio.ERROR_NEGATIVE_VOLUME_GAIN); + assertThrows(() => C_WebAudio.setGlobalVolume(newVolumeGain), expectedError); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setGlobalVolume(C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/setMusicVolume.js b/Tests/API/C_WebAudio/setMusicVolume.js new file mode 100644 index 0000000..55806f7 --- /dev/null +++ b/Tests/API/C_WebAudio/setMusicVolume.js @@ -0,0 +1,77 @@ +describe("setMusicVolume", () => { + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getMusicVolume())); + + const path = require("path"); + const someMusicFile = path.join(WEBCLIENT_FIXTURES_DIR, "WebAudio", "dumbo.ogg"); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.setMusicVolume, "function"); + }); + + it("should be able to set the volume of the Music track", () => { + const previousVolumeGain = C_WebAudio.getMusicVolume(); + const newVolumeGain = 0.1234567; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setMusicVolume(newVolumeGain); + assertApproximatelyEquals(C_WebAudio.getMusicVolume(), newVolumeGain); + }); + + // Internally, BabylonJS appears to convert negative master gain values to positive ones + // Since that seems counter-intuitive and weird, we simply disallow it + it("should throw a RangeError when the volume level is negative", () => { + const previousVolumeGain = C_WebAudio.getMusicVolume(); + const newVolumeGain = -0.1234567; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + const expectedError = new RangeError(C_WebAudio.ERROR_NEGATIVE_VOLUME_GAIN); + assertThrows(() => C_WebAudio.setMusicVolume(newVolumeGain), expectedError); + }); + + it("should apply the volume level to existing audio sources on the track", () => { + const audioSource = new AudioSource(someMusicFile); + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC); + const soundHandleID = musicTrack.addAudioSource(audioSource); + + const previousVolumeGain = C_WebAudio.getMusicVolume(); + const newVolumeGain = 0.5247; + + assertEquals(audioSource.getVolume(), previousVolumeGain); + C_WebAudio.setMusicVolume(newVolumeGain); + assertEquals(audioSource.getVolume(), newVolumeGain); + + musicTrack.removeAudioSource(soundHandleID); + }); + + it("should apply the volume level to newly-created audio sources on the track", () => { + const audioSource = new AudioSource(someMusicFile); + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC); + + const previousVolumeGain = C_WebAudio.getMusicVolume(); + const newVolumeGain = 0.87483; + + // Just to be safe + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setMusicVolume(newVolumeGain); + + // It should use 1 by default, or at least something different from the arbitrary new volume gain defined here + // So this SHOULD test that the new volume hasn't yet been applied, regardless of what the current one is + assertNotEquals(audioSource.getVolume(), newVolumeGain); + const soundHandleID = musicTrack.addAudioSource(audioSource); + assertEquals(audioSource.getVolume(), newVolumeGain); + + musicTrack.removeAudioSource(soundHandleID); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setMusicVolume(C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/setTrackVolume.js b/Tests/API/C_WebAudio/setTrackVolume.js new file mode 100644 index 0000000..68a9a6f --- /dev/null +++ b/Tests/API/C_WebAudio/setTrackVolume.js @@ -0,0 +1,131 @@ +describe("setTrackVolume", () => { + const customTrackID = "MyCustomTrack"; + const newTrack = C_WebAudio.createTrack(customTrackID); + + beforeEach(() => (C_WebAudio.originalVolumeGain = C_WebAudio.getTrackVolume(customTrackID))); + + const path = require("path"); + const someMusicFile = path.join(WEBCLIENT_FIXTURES_DIR, "WebAudio", "dumbo.ogg"); + + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.setTrackVolume, "function"); + }); + + it("should be able to set the volume of a custom track", () => { + const previousVolumeGain = C_WebAudio.getTrackVolume(customTrackID); + const newVolumeGain = 0.252636; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setTrackVolume(customTrackID, newVolumeGain); + assertApproximatelyEquals(C_WebAudio.getTrackVolume(customTrackID), newVolumeGain); + }); + + // Internally, BabylonJS appears to convert negative master gain values to positive ones + // Since that seems counter-intuitive and weird, we simply disallow it + it("should throw a RangeError when the volume level is negative", () => { + const previousVolumeGain = C_WebAudio.getTrackVolume(customTrackID); + const newVolumeGain = -0.96958; + + // Just to be safe (it's unlikely to ever trigger) + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + const expectedError = new RangeError(C_WebAudio.ERROR_NEGATIVE_VOLUME_GAIN); + assertThrows(() => C_WebAudio.setTrackVolume(customTrackID, newVolumeGain), expectedError); + }); + + it("should apply the volume level to existing audio sources on the track", () => { + const audioSource = new AudioSource(someMusicFile); + const customTrack = C_WebAudio.getTrackInfo(customTrackID); + const soundHandleID = customTrack.addAudioSource(audioSource); + + const previousVolumeGain = C_WebAudio.getTrackVolume(customTrackID); + const newVolumeGain = 0.47478; + + assertEquals(audioSource.getVolume(), previousVolumeGain); + C_WebAudio.setTrackVolume(customTrackID, newVolumeGain); + assertEquals(audioSource.getVolume(), newVolumeGain); + + customTrack.removeAudioSource(soundHandleID); + }); + + it("should apply the volume level to newly-created audio sources on the track", () => { + const audioSource = new AudioSource(someMusicFile); + const customTrack = C_WebAudio.getTrackInfo(customTrackID); + + const previousVolumeGain = C_WebAudio.getTrackVolume(customTrackID); + const newVolumeGain = 0.21314; + + // Just to be safe + assertNotApproximatelyEquals(previousVolumeGain, newVolumeGain); + + C_WebAudio.setTrackVolume(customTrackID, newVolumeGain); + + // It should use 1 by default, or at least something different from the arbitrary new volume gain defined here + // So this SHOULD test that the new volume hasn't yet been applied, regardless of what the current one is + assertNotEquals(audioSource.getVolume(), newVolumeGain); + const soundHandleID = customTrack.addAudioSource(audioSource); + assertEquals(audioSource.getVolume(), newVolumeGain); + + customTrack.removeAudioSource(soundHandleID); + }); + + const expectedErrorMessage = "Usage: setTrackVolume(String trackID, Number volumeGain)"; + const typeError = new TypeError(expectedErrorMessage); + it("should throw a TypeError if no track ID was passed", () => { + assertThrows(() => C_WebAudio.setTrackVolume(), typeError); + }); + + it("should throw a TypeError if no volume gain was passed", () => { + assertThrows(() => C_WebAudio.setTrackVolume(customTrackID), typeError); + }); + + it("should throw a TypeError if the track ID is not a String", () => { + // TODO: DRY, move to fixtures + const invalidTrackIDs = [ + 42, + [42], + NaN, + { 42: 42 }, + () => { + let there = "peace"; + }, + C_WebAudio, + ]; + + invalidTrackIDs.forEach((invalidTrackID) => { + assertThrows(() => C_WebAudio.setTrackVolume(invalidTrackID, 0.25), typeError); + }); + }); + + it("should throw a TypeError if the volume gain is not a number", () => { + const invalidVolumeGains = [ + "42", + NaN, + [42], + { 42: 42 }, + () => { + let there = "peace"; + }, + C_WebAudio, + ]; + + invalidVolumeGains.forEach((invalidVolumeGain) => { + assertThrows(() => C_WebAudio.setTrackVolume(customTrackID, invalidVolumeGain), typeError); + }); + }); + + it("should throw a TypeError if no audio track with the given track ID exists", () => { + const invalidTrackID = "DoesNotExistProbably"; + const expectedError = new RangeError(C_WebAudio.ERROR_INVALID_TRACK_ID + ": " + invalidTrackID); + assertThrows(() => C_WebAudio.setTrackVolume(invalidTrackID, 0.27), expectedError); + }); + + // Cleanup: Restore the previous gain level so as to not mess up other tests + afterEach(() => { + C_WebAudio.setTrackVolume(customTrackID, C_WebAudio.originalVolumeGain); + }); + + after(() => delete C_WebAudio.originalVolumeGain); +}); diff --git a/Tests/API/C_WebAudio/stopMusic.js b/Tests/API/C_WebAudio/stopMusic.js new file mode 100644 index 0000000..8d7dd41 --- /dev/null +++ b/Tests/API/C_WebAudio/stopMusic.js @@ -0,0 +1,15 @@ +// todo mute music before all tests that may produce sound +describe("stopMusic", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.stopMusic, "function"); + }); + + it("should do nothing if no music is currently playing", () => { + const musicTrack = C_WebAudio.getTrackInfo(Enum.AUDIO_CHANNEL_MUSIC); + assertEquals(musicTrack.getNumActiveVoices(), 0); + assertEquals(musicTrack.getNumAudioSources(), 0); + C_WebAudio.stopMusic(); + assertEquals(musicTrack.getNumActiveVoices(), 0); + assertEquals(musicTrack.getNumAudioSources(), 0); + }); +}); diff --git a/Tests/API/C_WebAudio/stopSound.js b/Tests/API/C_WebAudio/stopSound.js new file mode 100644 index 0000000..3a8486a --- /dev/null +++ b/Tests/API/C_WebAudio/stopSound.js @@ -0,0 +1,5 @@ +describe("playSound", () => { + it("should be exported as part of the API surface", () => { + assertEquals(typeof C_WebAudio.playSound, "function"); + }); +}); diff --git a/Tests/Builtins/AudioTrack.js b/Tests/Builtins/AudioTrack.js new file mode 100644 index 0000000..77f4389 --- /dev/null +++ b/Tests/Builtins/AudioTrack.js @@ -0,0 +1,5 @@ +describe("AudioTrack", () => { + it("should be exported into the global environment", () => { + assertEquals(typeof AudioTrack.constructor, "function"); + }); +}); diff --git a/Tests/Fixtures/WebAudio/dumbo-license.txt b/Tests/Fixtures/WebAudio/dumbo-license.txt new file mode 100644 index 0000000..7bf0a74 --- /dev/null +++ b/Tests/Fixtures/WebAudio/dumbo-license.txt @@ -0,0 +1,3 @@ +Source: https://archive.org/details/badpanda082 +License: https://creativecommons.org/licenses/by-nc-sa/3.0/ +Modifications: Selected the first 30 seconds, reduced quality (minimum setting in Audacity) \ No newline at end of file diff --git a/Tests/Fixtures/WebAudio/dumbo.ogg b/Tests/Fixtures/WebAudio/dumbo.ogg new file mode 100644 index 0000000..34cfac5 Binary files /dev/null and b/Tests/Fixtures/WebAudio/dumbo.ogg differ diff --git a/Tests/Fixtures/WebAudio/dumbo2.ogg b/Tests/Fixtures/WebAudio/dumbo2.ogg new file mode 100644 index 0000000..34cfac5 Binary files /dev/null and b/Tests/Fixtures/WebAudio/dumbo2.ogg differ diff --git a/Tests/SharedConstants/AudioChannels.js b/Tests/SharedConstants/AudioChannels.js new file mode 100644 index 0000000..3bb99cc --- /dev/null +++ b/Tests/SharedConstants/AudioChannels.js @@ -0,0 +1,23 @@ +const enumCategories = { + "Audio Channels": { + AUDIO_CHANNEL_SFX: "SFX", + AUDIO_CHANNEL_MUSIC: "Music", + AUDIO_CHANNEL_AMBIENCE: "Ambience", + }, +}; + +describe("Shared enumeration constants", () => { + for (const namespace in enumCategories) { + const testCases = enumCategories[namespace]; + + describe(namespace, () => { + for (const enumKey in testCases) { + const expectedValue = testCases[enumKey]; + + it("should export Enum key " + enumKey + " as " + expectedValue, () => { + assertEquals(Enum[enumKey], expectedValue); + }); + } + }); + } +}); diff --git a/Tests/run-renderer-tests.js b/Tests/run-renderer-tests.js index f3fe434..33502f4 100644 --- a/Tests/run-renderer-tests.js +++ b/Tests/run-renderer-tests.js @@ -1,8 +1,9 @@ const testSuites = { - SharedConstants: ["SharedConstants/Aliases.js", "SharedConstants/Paths.js"], + SharedConstants: ["SharedConstants/Aliases.js", "SharedConstants/AudioChannels.js", "SharedConstants/Paths.js"], Builtins: [ "Builtins/Assertions.js", "Builtins/LocalCacheTests.js", + "Builtins/AudioTrack.js", "Builtins/Decoder.js", "Builtins/UniqueID.js", "Builtins/Validators.js", @@ -14,7 +15,34 @@ const testSuites = { "API/C_Settings/validateDefaultSettings.js", "API/C_Settings/validateUserSettings.js", ], - C_WebAudio: ["API/C_WebAudio/BuiltinAudioDecoder.js"], + C_WebAudio: [ + "API/C_WebAudio/AudioSource.js", + "API/C_WebAudio/BuiltinAudioDecoder.js", + "API/C_WebAudio/createTrack.js", + "API/C_WebAudio/getTrackInfo.js", + "API/C_WebAudio/playMusic.js", + "API/C_WebAudio/stopMusic.js", + "API/C_WebAudio/playSound.js", + "API/C_WebAudio/stopSound.js", + "API/C_WebAudio/playSoundEffect.js", + "API/C_WebAudio/playAmbientSound.js", + "API/C_WebAudio/setGlobalVolume.js", + "API/C_WebAudio/getGlobalVolume.js", + "API/C_WebAudio/getTrackVolume.js", + "API/C_WebAudio/setTrackVolume.js", + "API/C_WebAudio/getMusicVolume.js", + "API/C_WebAudio/setMusicVolume.js", + "API/C_WebAudio/getEffectsVolume.js", + "API/C_WebAudio/setEffectsVolume.js", + "API/C_WebAudio/getAmbienceVolume.js", + "API/C_WebAudio/setAmbienceVolume.js", + "API/C_WebAudio/getSupportedFileFormats.js", + "API/C_WebAudio/canPlayMP3.js", + "API/C_WebAudio/canPlayOGG.js", + "API/C_WebAudio/canPlayWAV.js", + "API/C_WebAudio/isAudioAvailable.js", + "API/C_WebAudio/isAudioContextInitialized.js", + ], }; for (const namespace in testSuites) { diff --git a/index.html b/index.html index 3085be2..ee65b1d 100644 --- a/index.html +++ b/index.html @@ -32,6 +32,7 @@ // Shorthand because I'm lazy (must be set after the localization tables have been read) let L = {}; // Localization table (populated on load for the current client locale only) + @@ -143,6 +144,9 @@ + + +