JSAudioContextCustom.cpp [plain text]
#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "AudioContext.h"
#include "ArrayBuffer.h"
#include "AudioBuffer.h"
#include "JSArrayBuffer.h"
#include "JSAudioBuffer.h"
#include "JSAudioContext.h"
#include <runtime/Error.h>
using namespace JSC;
namespace WebCore {
void JSAudioContext::visitChildren(SlotVisitor& visitor)
{
ASSERT_GC_OBJECT_INHERITS(this, &s_info);
COMPILE_ASSERT(StructureFlags & OverridesVisitChildren, OverridesVisitChildrenWithoutSettingFlag);
ASSERT(structure()->typeInfo().overridesVisitChildren());
Base::visitChildren(visitor);
m_impl->visitJSEventListeners(visitor);
}
EncodedJSValue JSC_HOST_CALL JSAudioContextConstructor::constructJSAudioContext(ExecState* exec)
{
JSAudioContextConstructor* jsConstructor = static_cast<JSAudioContextConstructor*>(exec->callee());
if (!jsConstructor)
return throwError(exec, createReferenceError(exec, "AudioContext constructor callee is unavailable"));
ScriptExecutionContext* scriptExecutionContext = jsConstructor->scriptExecutionContext();
if (!scriptExecutionContext)
return throwError(exec, createReferenceError(exec, "AudioContext constructor script execution context is unavailable"));
if (!scriptExecutionContext->isDocument())
return throwError(exec, createReferenceError(exec, "AudioContext constructor called in a script execution context which is not a document"));
Document* document = static_cast<Document*>(scriptExecutionContext);
RefPtr<AudioContext> audioContext;
if (!exec->argumentCount()) {
audioContext = AudioContext::create(document);
} else {
if (exec->argumentCount() < 3)
return throwError(exec, createSyntaxError(exec, "Not enough arguments"));
unsigned numberOfChannels = exec->argument(0).toInt32(exec);
unsigned numberOfFrames = exec->argument(1).toInt32(exec);
float sampleRate = exec->argument(2).toFloat(exec);
audioContext = AudioContext::createOfflineContext(document, numberOfChannels, numberOfFrames, sampleRate);
}
if (!audioContext.get())
return throwError(exec, createReferenceError(exec, "Error creating AudioContext"));
return JSValue::encode(asObject(toJS(exec, jsConstructor->globalObject(), audioContext.get())));
}
JSValue JSAudioContext::createBuffer(ExecState* exec)
{
if (exec->argumentCount() < 2)
return throwError(exec, createSyntaxError(exec, "Not enough arguments"));
AudioContext* audioContext = static_cast<AudioContext*>(impl());
ASSERT(audioContext);
JSValue val = exec->argument(0);
if (val.inherits(&JSArrayBuffer::s_info)) {
ArrayBuffer* arrayBuffer = toArrayBuffer(val);
ASSERT(arrayBuffer);
if (arrayBuffer) {
bool mixToMono = exec->argument(1).toBoolean(exec);
RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(arrayBuffer, mixToMono);
if (!audioBuffer.get())
return throwError(exec, createSyntaxError(exec, "Error decoding audio file data"));
return toJS(exec, globalObject(), audioBuffer.get());
}
return jsUndefined();
}
if (exec->argumentCount() < 3)
return throwError(exec, createSyntaxError(exec, "Not enough arguments"));
unsigned numberOfChannels = exec->argument(0).toInt32(exec);
unsigned numberOfFrames = exec->argument(1).toInt32(exec);
float sampleRate = exec->argument(2).toFloat(exec);
RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(numberOfChannels, numberOfFrames, sampleRate);
if (!audioBuffer.get())
return throwError(exec, createSyntaxError(exec, "Error creating AudioBuffer"));
return toJS(exec, globalObject(), audioBuffer.get());
}
}
#endif // ENABLE(WEB_AUDIO)