working on Quest now too via Vosk

speech
Fabien Benetou 6 months ago
parent 0af99c5c40
commit 89ed593d20
  1. 88
      index.html
  2. 39
      recognizer-processor.js

@ -9,6 +9,7 @@
<script src='dependencies/webdav.js'></script>
<script src='jxr-core.js?1234'></script>
<script src='jxr-postitnote.js?13235'></script>
<script type="application/javascript" src="https://cdn.jsdelivr.net/npm/vosk-browser@0.0.8/dist/vosk.js"></script>
</head>
<body>
@ -55,23 +56,88 @@ function getAllPrimitives(){
.map( (i,j) => i )
} // adapted from https://git.benetou.fr/utopiah/text-code-xr-engine/src/commit/0e1f297ec0cd17b0356811dfa0ab55f1e2629e7c/index.html#L2101
// should test first
const SpeechRecognition = window.webkitSpeechRecognition;
recognizer = new SpeechRecognition();
recognizer.interimResults = true;
recognizer.continuous = true;
// does not work recognizer.lang = 'fr-FR';
recognizer.lang = 'en-US';
//(SpeechRecognition) ? console.log('should switch back to native WebSpeech API from speech branch') : console.log('polyfilling WebSpeech API')
(SpeechRecognition) ? nativeSpeechRecognition( parseSpeech ) : startVoiceRecognition( parseSpeech )
const aframeprimitives = getAllPrimitives()
const speechactions = [ "add", "apply", "delete", "clone", "model", "undo" ]
const speechcustomcomponents = [ "target", "teleporter" ]
function nativeSpeechRecognition(callbackOnComplete){
recognizer = new SpeechRecognition();
recognizer.interimResults = true;
recognizer.continuous = true;
// does not work recognizer.lang = 'fr-FR';
recognizer.lang = 'en-US';
recognizer.onresult = (event) => {
recognizer.onresult = (event) => {
let result = event.results[event.resultIndex]
if (result.isFinal) {
console.log('You said: ' + result[0].transcript )
let speechContent = result[0].transcript
callbackOnComplete( speechContent )
}
}
}
// recognizer.start();
async function startVoiceRecognition( callbackOnComplete ) {
/* requires
recognizer-processor.js
https://fabien.benetou.fr/pub/home/future_of_text_demo/engine/vosk-browser/vosk-model-small-en-us-0.15.tar.gz
from https://github.com/ccoreilly/vosk-browser/tree/master/examples/modern-vanilla
*/
const channel = new MessageChannel();
// const model = await Vosk.createModel('model.tar.gz');
const model = await Vosk.createModel('https://fabien.benetou.fr/pub/home/future_of_text_demo/engine/vosk-browser/vosk-model-small-en-us-0.15.tar.gz');
model.registerPort(channel.port1);
const sampleRate = 48000;
const recognizer = new model.KaldiRecognizer(sampleRate);
recognizer.setWords(true);
recognizer.on("result", (message) => {
const result = message.result;
if (result) console.log(JSON.stringify(result, null, 2));
callbackOnComplete( result.text )
});
recognizer.on("partialresult", (message) => {
const partial = message.result.partial;
if (partial) console.log(partial)
});
const mediaStream = await navigator.mediaDevices.getUserMedia({
video: false,
audio: {
echoCancellation: true,
noiseSuppression: true,
channelCount: 1,
sampleRate
},
});
const audioContext = new AudioContext();
await audioContext.audioWorklet.addModule('recognizer-processor.js')
const recognizerProcessor = new AudioWorkletNode(audioContext, 'recognizer-processor', { channelCount: 1, numberOfInputs: 1, numberOfOutputs: 1 });
recognizerProcessor.port.postMessage({action: 'init', recognizerId: recognizer.id}, [ channel.port2 ])
recognizerProcessor.connect(audioContext.destination);
const source = audioContext.createMediaStreamSource(mediaStream);
source.connect(recognizerProcessor);
}
const aframeprimitives = getAllPrimitives()
const speechactions = [ "add", "apply", "delete", "clone", "model", "undo" ]
const speechcustomcomponents = [ "target", "teleporter" ]
function parseSpeech( speechContent ) {
let latest = addedContent.at(-1)
let cmd_words = speechContent.split(" ").map( i => i.toLowerCase() )
let el
@ -145,9 +211,9 @@ recognizer.onresult = (event) => {
addedContent.push( addNewNoteAsPostItNote(speechContent, "0 1.2 -.5") )
// could become jxr code proper later, also allowing to re-execute a command again
}
}
}
recognizer.start();
//------------------------------------------------------------------------------------------------------------------------------
var forceXaxis
// setInterval( _ => console.log(forceXaxis), 1000)

@ -0,0 +1,39 @@
class RecognizerAudioProcessor extends AudioWorkletProcessor {
constructor(options) {
super(options);
this.port.onmessage = this._processMessage.bind(this);
}
_processMessage(event) {
// console.debug(`Received event ${JSON.stringify(event.data, null, 2)}`);
if (event.data.action === "init") {
this._recognizerId = event.data.recognizerId;
this._recognizerPort = event.ports[0];
}
}
process(inputs, outputs, parameters) {
const data = inputs[0][0];
if (this._recognizerPort && data) {
// AudioBuffer samples are represented as floating point numbers between -1.0 and 1.0 whilst
// Kaldi expects them to be between -32768 and 32767 (the range of a signed int16)
const audioArray = data.map((value) => value * 0x8000);
this._recognizerPort.postMessage(
{
action: "audioChunk",
data: audioArray,
recognizerId: this._recognizerId,
sampleRate, // Part of AudioWorkletGlobalScope
},
{
transfer: [audioArray.buffer],
}
);
}
return true;
}
}
registerProcessor('recognizer-processor', RecognizerAudioProcessor)
Loading…
Cancel
Save