#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "AudioParam.h"
#include "AudioNode.h"
#include "AudioNodeOutput.h"
#include "AudioUtilities.h"
#include "FloatConversion.h"
#include "Logging.h"
#include <wtf/MathExtras.h>
namespace WebCore {
const double AudioParam::DefaultSmoothingConstant = 0.05;
const double AudioParam::SnapThreshold = 0.001;
AudioParam::AudioParam(BaseAudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, AutomationRate automationRate, AutomationRateMode automationRateMode, unsigned units)
: AudioSummingJunction(context)
, m_name(name)
, m_value(defaultValue)
, m_defaultValue(defaultValue)
, m_minValue(minValue)
, m_maxValue(maxValue)
, m_automationRate(automationRate)
, m_automationRateMode(automationRateMode)
, m_units(units)
, m_smoothedValue(defaultValue)
, m_smoothingConstant(DefaultSmoothingConstant)
#if !RELEASE_LOG_DISABLED
, m_logger(context.logger())
, m_logIdentifier(context.nextAudioParameterLogIdentifier())
#endif
{
ALWAYS_LOG(LOGIDENTIFIER, "name = ", m_name, ", value = ", m_value, ", default = ", m_defaultValue, ", min = ", m_minValue, ", max = ", m_maxValue, ", units = ", m_units);
}
float AudioParam::value()
{
if (context().isAudioThread()) {
bool hasValue;
float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
if (hasValue)
m_value = timelineValue;
}
return narrowPrecisionToFloat(m_value);
}
void AudioParam::setValue(float value)
{
DEBUG_LOG(LOGIDENTIFIER, value);
if (!std::isnan(value) && !std::isinf(value))
m_value = value;
}
ExceptionOr<void> AudioParam::setAutomationRate(AutomationRate automationRate)
{
if (m_automationRateMode == AutomationRateMode::Fixed)
return Exception { InvalidStateError, "automationRate cannot be changed for this node" };
m_automationRate = automationRate;
return { };
}
float AudioParam::smoothedValue()
{
return narrowPrecisionToFloat(m_smoothedValue);
}
bool AudioParam::smooth()
{
bool useTimelineValue = false;
m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
if (m_smoothedValue == m_value) {
return true;
}
if (useTimelineValue)
m_smoothedValue = m_value;
else {
m_smoothedValue += (m_value - m_smoothedValue) * m_smoothingConstant;
if (fabs(m_smoothedValue - m_value) < SnapThreshold) m_smoothedValue = m_value;
}
return false;
}
ExceptionOr<AudioParam&> AudioParam::setValueAtTime(float value, double startTime)
{
if (startTime < 0)
return Exception { RangeError, "startTime must be a positive value"_s };
auto result = m_timeline.setValueAtTime(value, Seconds { startTime });
if (result.hasException())
return result.releaseException();
return *this;
}
ExceptionOr<AudioParam&> AudioParam::linearRampToValueAtTime(float value, double endTime)
{
if (endTime < 0)
return Exception { RangeError, "endTime must be a positive value"_s };
auto result = m_timeline.linearRampToValueAtTime(value, Seconds { endTime });
if (result.hasException())
return result.releaseException();
return *this;
}
ExceptionOr<AudioParam&> AudioParam::exponentialRampToValueAtTime(float value, double endTime)
{
if (!value)
return Exception { RangeError, "value cannot be 0"_s };
if (endTime < 0)
return Exception { RangeError, "endTime must be a positive value"_s };
auto result = m_timeline.exponentialRampToValueAtTime(value, Seconds { endTime });
if (result.hasException())
return result.releaseException();
return *this;
}
ExceptionOr<AudioParam&> AudioParam::setTargetAtTime(float target, double startTime, float timeConstant)
{
if (startTime < 0)
return Exception { RangeError, "startTime must be a positive value"_s };
if (timeConstant < 0)
return Exception { RangeError, "timeConstant must be a positive value"_s };
auto result = m_timeline.setTargetAtTime(target, Seconds { startTime }, timeConstant);
if (result.hasException())
return result.releaseException();
return *this;
}
ExceptionOr<AudioParam&> AudioParam::setValueCurveAtTime(Vector<float>&& curve, double startTime, double duration)
{
if (curve.size() < 2)
return Exception { InvalidStateError, "Array must have a length of at least 2"_s };
if (startTime < 0)
return Exception { RangeError, "startTime must be a positive value"_s };
if (duration <= 0)
return Exception { RangeError, "duration must be a strictly positive value"_s };
auto result = m_timeline.setValueCurveAtTime(WTFMove(curve), Seconds { startTime }, Seconds { duration });
if (result.hasException())
return result.releaseException();
return *this;
}
ExceptionOr<AudioParam&> AudioParam::cancelScheduledValues(double cancelTime)
{
if (cancelTime < 0)
return Exception { RangeError, "cancelTime must be a positive value"_s };
m_timeline.cancelScheduledValues(Seconds { cancelTime });
return *this;
}
float AudioParam::finalValue()
{
float value;
calculateFinalValues(&value, 1, false);
return value;
}
void AudioParam::calculateSampleAccurateValues(float* values, unsigned numberOfValues)
{
bool isSafe = context().isAudioThread() && values && numberOfValues;
ASSERT(isSafe);
if (!isSafe)
return;
calculateFinalValues(values, numberOfValues, automationRate() == AutomationRate::ARate);
}
void AudioParam::calculateFinalValues(float* values, unsigned numberOfValues, bool sampleAccurate)
{
bool isGood = context().isAudioThread() && values && numberOfValues;
ASSERT(isGood);
if (!isGood)
return;
if (sampleAccurate) {
calculateTimelineValues(values, numberOfValues);
} else {
bool hasValue;
float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
if (hasValue)
m_value = timelineValue;
values[0] = narrowPrecisionToFloat(m_value);
}
auto summingBus = AudioBus::create(1, numberOfValues, false);
summingBus->setChannelMemory(0, values, numberOfValues);
for (auto& output : m_renderingOutputs) {
ASSERT(output);
AudioBus* connectionBus = output->pull(0, AudioNode::ProcessingSizeInFrames);
summingBus->sumFrom(*connectionBus);
}
}
void AudioParam::calculateTimelineValues(float* values, unsigned numberOfValues)
{
double sampleRate = context().sampleRate();
Seconds startTime = Seconds { context().currentTime() };
Seconds endTime = startTime + Seconds { numberOfValues / sampleRate };
m_value = m_timeline.valuesForTimeRange(startTime, endTime, narrowPrecisionToFloat(m_value), values, numberOfValues, sampleRate, sampleRate);
}
void AudioParam::connect(AudioNodeOutput* output)
{
ASSERT(context().isGraphOwner());
ASSERT(output);
if (!output)
return;
if (!m_outputs.add(output).isNewEntry)
return;
INFO_LOG(LOGIDENTIFIER, output->node()->nodeType());
output->addParam(this);
changedOutputs();
}
void AudioParam::disconnect(AudioNodeOutput* output)
{
ASSERT(context().isGraphOwner());
ASSERT(output);
if (!output)
return;
INFO_LOG(LOGIDENTIFIER, output->node()->nodeType());
if (m_outputs.remove(output)) {
changedOutputs();
output->removeParam(this);
}
}
#if !RELEASE_LOG_DISABLED
WTFLogChannel& AudioParam::logChannel() const
{
return LogMedia;
}
#endif
}
#endif // ENABLE(WEB_AUDIO)