SpeechRecognition.cpp [plain text]
#include "config.h"
#if ENABLE(SCRIPTED_SPEECH)
#include "SpeechRecognition.h"
#include "Document.h"
#include "Page.h"
#include "SpeechRecognitionController.h"
#include "SpeechRecognitionError.h"
#include "SpeechRecognitionEvent.h"
namespace WebCore {
PassRefPtr<SpeechRecognition> SpeechRecognition::create(ScriptExecutionContext* context)
{
RefPtr<SpeechRecognition> speechRecognition(adoptRef(new SpeechRecognition(context)));
speechRecognition->suspendIfNeeded();
return speechRecognition.release();
}
void SpeechRecognition::start()
{
ASSERT(m_controller); m_controller->start(this, m_grammars.get(), m_lang, m_continuous);
}
void SpeechRecognition::stopFunction()
{
ASSERT(m_controller);
m_controller->stop(this); }
void SpeechRecognition::abort()
{
ASSERT(m_controller);
m_controller->abort(this); }
void SpeechRecognition::didStartAudio()
{
dispatchEvent(Event::create(eventNames().audiostartEvent, false, false));
}
void SpeechRecognition::didStartSound()
{
dispatchEvent(Event::create(eventNames().soundstartEvent, false, false));
}
void SpeechRecognition::didStartSpeech()
{
dispatchEvent(Event::create(eventNames().speechstartEvent, false, false));
}
void SpeechRecognition::didEndSpeech()
{
dispatchEvent(Event::create(eventNames().speechendEvent, false, false));
}
void SpeechRecognition::didEndSound()
{
dispatchEvent(Event::create(eventNames().soundendEvent, false, false));
}
void SpeechRecognition::didEndAudio()
{
dispatchEvent(Event::create(eventNames().audioendEvent, false, false));
}
void SpeechRecognition::didReceiveResult(PassRefPtr<SpeechRecognitionResult> result, unsigned long resultIndex, PassRefPtr<SpeechRecognitionResultList> resultHistory)
{
dispatchEvent(SpeechRecognitionEvent::createResult(result, resultIndex, resultHistory));
}
void SpeechRecognition::didReceiveNoMatch(PassRefPtr<SpeechRecognitionResult> result)
{
dispatchEvent(SpeechRecognitionEvent::createNoMatch(result));
}
void SpeechRecognition::didDeleteResult(unsigned resultIndex, PassRefPtr<SpeechRecognitionResultList> resultHistory)
{
dispatchEvent(SpeechRecognitionEvent::createResultDeleted(resultIndex, resultHistory));
}
void SpeechRecognition::didReceiveError(PassRefPtr<SpeechRecognitionError> error)
{
dispatchEvent(SpeechRecognitionEvent::createError(error));
}
void SpeechRecognition::didStart()
{
dispatchEvent(Event::create(eventNames().startEvent, false, false));
}
void SpeechRecognition::didEnd()
{
dispatchEvent(Event::create(eventNames().endEvent, false, false));
}
const AtomicString& SpeechRecognition::interfaceName() const
{
return eventNames().interfaceForSpeechRecognition;
}
ScriptExecutionContext* SpeechRecognition::scriptExecutionContext() const
{
return ActiveDOMObject::scriptExecutionContext();
}
SpeechRecognition::SpeechRecognition(ScriptExecutionContext* context)
: ActiveDOMObject(context, this)
, m_grammars(SpeechGrammarList::create()) , m_continuous(false)
, m_controller(0)
{
ASSERT(scriptExecutionContext()->isDocument());
Document* document = static_cast<Document*>(scriptExecutionContext());
Page* page = document->page();
ASSERT(page);
m_controller = SpeechRecognitionController::from(page);
ASSERT(m_controller);
}
SpeechRecognition::~SpeechRecognition()
{
}
}
#endif // ENABLE(SCRIPTED_SPEECH)