|
|
@ -9,11 +9,12 @@ |
|
|
|
<script src='dependencies/webdav.js'></script> |
|
|
|
<script src='dependencies/webdav.js'></script> |
|
|
|
<script src='jxr-core.js?1234'></script> |
|
|
|
<script src='jxr-core.js?1234'></script> |
|
|
|
<script src='jxr-postitnote.js?13235'></script> |
|
|
|
<script src='jxr-postitnote.js?13235'></script> |
|
|
|
|
|
|
|
<script type="application/javascript" src="https://cdn.jsdelivr.net/npm/vosk-browser@0.0.8/dist/vosk.js"></script> |
|
|
|
</head> |
|
|
|
</head> |
|
|
|
<body> |
|
|
|
<body> |
|
|
|
|
|
|
|
|
|
|
|
<script> |
|
|
|
<script> |
|
|
|
|
|
|
|
|
|
|
|
//________________________________________________________________ |
|
|
|
//________________________________________________________________ |
|
|
|
const endpointDomainOrIP = '192.168.0.129' // note that if the certificate is NOT proper, then consider opening it first to accept it on device |
|
|
|
const endpointDomainOrIP = '192.168.0.129' // note that if the certificate is NOT proper, then consider opening it first to accept it on device |
|
|
|
// e.g https://hmd.link/?https://192.168.0.129:8443/ |
|
|
|
// e.g https://hmd.link/?https://192.168.0.129:8443/ |
|
|
@ -55,23 +56,88 @@ function getAllPrimitives(){ |
|
|
|
.map( (i,j) => i ) |
|
|
|
.map( (i,j) => i ) |
|
|
|
} // adapted from https://git.benetou.fr/utopiah/text-code-xr-engine/src/commit/0e1f297ec0cd17b0356811dfa0ab55f1e2629e7c/index.html#L2101 |
|
|
|
} // adapted from https://git.benetou.fr/utopiah/text-code-xr-engine/src/commit/0e1f297ec0cd17b0356811dfa0ab55f1e2629e7c/index.html#L2101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// should test first |
|
|
|
const SpeechRecognition = window.webkitSpeechRecognition; |
|
|
|
const SpeechRecognition = window.webkitSpeechRecognition; |
|
|
|
recognizer = new SpeechRecognition(); |
|
|
|
//(SpeechRecognition) ? console.log('should switch back to native WebSpeech API from speech branch') : console.log('polyfilling WebSpeech API') |
|
|
|
recognizer.interimResults = true; |
|
|
|
(SpeechRecognition) ? nativeSpeechRecognition( parseSpeech ) : startVoiceRecognition( parseSpeech ) |
|
|
|
recognizer.continuous = true; |
|
|
|
|
|
|
|
// does not work recognizer.lang = 'fr-FR'; |
|
|
|
function nativeSpeechRecognition(callbackOnComplete){ |
|
|
|
recognizer.lang = 'en-US'; |
|
|
|
recognizer = new SpeechRecognition(); |
|
|
|
|
|
|
|
recognizer.interimResults = true; |
|
|
|
|
|
|
|
recognizer.continuous = true; |
|
|
|
|
|
|
|
// does not work recognizer.lang = 'fr-FR'; |
|
|
|
|
|
|
|
recognizer.lang = 'en-US'; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
recognizer.onresult = (event) => { |
|
|
|
|
|
|
|
let result = event.results[event.resultIndex] |
|
|
|
|
|
|
|
if (result.isFinal) { |
|
|
|
|
|
|
|
console.log('You said: ' + result[0].transcript ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let speechContent = result[0].transcript |
|
|
|
|
|
|
|
callbackOnComplete( speechContent ) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// recognizer.start(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async function startVoiceRecognition( callbackOnComplete ) { |
|
|
|
|
|
|
|
/* requires |
|
|
|
|
|
|
|
recognizer-processor.js |
|
|
|
|
|
|
|
https://fabien.benetou.fr/pub/home/future_of_text_demo/engine/vosk-browser/vosk-model-small-en-us-0.15.tar.gz |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from https://github.com/ccoreilly/vosk-browser/tree/master/examples/modern-vanilla |
|
|
|
|
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const channel = new MessageChannel(); |
|
|
|
|
|
|
|
// const model = await Vosk.createModel('model.tar.gz'); |
|
|
|
|
|
|
|
const model = await Vosk.createModel('https://fabien.benetou.fr/pub/home/future_of_text_demo/engine/vosk-browser/vosk-model-small-en-us-0.15.tar.gz'); |
|
|
|
|
|
|
|
model.registerPort(channel.port1); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const sampleRate = 48000; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const recognizer = new model.KaldiRecognizer(sampleRate); |
|
|
|
|
|
|
|
recognizer.setWords(true); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
recognizer.on("result", (message) => { |
|
|
|
|
|
|
|
const result = message.result; |
|
|
|
|
|
|
|
if (result) console.log(JSON.stringify(result, null, 2)); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
callbackOnComplete( result.text ) |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
recognizer.on("partialresult", (message) => { |
|
|
|
|
|
|
|
const partial = message.result.partial; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (partial) console.log(partial) |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const mediaStream = await navigator.mediaDevices.getUserMedia({ |
|
|
|
|
|
|
|
video: false, |
|
|
|
|
|
|
|
audio: { |
|
|
|
|
|
|
|
echoCancellation: true, |
|
|
|
|
|
|
|
noiseSuppression: true, |
|
|
|
|
|
|
|
channelCount: 1, |
|
|
|
|
|
|
|
sampleRate |
|
|
|
|
|
|
|
}, |
|
|
|
|
|
|
|
}); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const audioContext = new AudioContext(); |
|
|
|
|
|
|
|
await audioContext.audioWorklet.addModule('recognizer-processor.js') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const recognizerProcessor = new AudioWorkletNode(audioContext, 'recognizer-processor', { channelCount: 1, numberOfInputs: 1, numberOfOutputs: 1 }); |
|
|
|
|
|
|
|
recognizerProcessor.port.postMessage({action: 'init', recognizerId: recognizer.id}, [ channel.port2 ]) |
|
|
|
|
|
|
|
recognizerProcessor.connect(audioContext.destination); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const source = audioContext.createMediaStreamSource(mediaStream); |
|
|
|
|
|
|
|
source.connect(recognizerProcessor); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
const aframeprimitives = getAllPrimitives() |
|
|
|
const aframeprimitives = getAllPrimitives() |
|
|
|
const speechactions = [ "add", "apply", "delete", "clone", "model", "undo" ] |
|
|
|
const speechactions = [ "add", "apply", "delete", "clone", "model", "undo" ] |
|
|
|
const speechcustomcomponents = [ "target", "teleporter" ] |
|
|
|
const speechcustomcomponents = [ "target", "teleporter" ] |
|
|
|
|
|
|
|
function parseSpeech( speechContent ) { |
|
|
|
recognizer.onresult = (event) => { |
|
|
|
|
|
|
|
let result = event.results[event.resultIndex] |
|
|
|
|
|
|
|
if (result.isFinal) { |
|
|
|
|
|
|
|
console.log('You said: ' + result[0].transcript ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let speechContent = result[0].transcript |
|
|
|
|
|
|
|
let latest = addedContent.at(-1) |
|
|
|
let latest = addedContent.at(-1) |
|
|
|
let cmd_words = speechContent.split(" ").map( i => i.toLowerCase() ) |
|
|
|
let cmd_words = speechContent.split(" ").map( i => i.toLowerCase() ) |
|
|
|
let el |
|
|
|
let el |
|
|
@ -145,9 +211,9 @@ recognizer.onresult = (event) => { |
|
|
|
addedContent.push( addNewNoteAsPostItNote(speechContent, "0 1.2 -.5") ) |
|
|
|
addedContent.push( addNewNoteAsPostItNote(speechContent, "0 1.2 -.5") ) |
|
|
|
// could become jxr code proper later, also allowing to re-execute a command again |
|
|
|
// could become jxr code proper later, also allowing to re-execute a command again |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
recognizer.start(); |
|
|
|
|
|
|
|
|
|
|
|
//------------------------------------------------------------------------------------------------------------------------------ |
|
|
|
|
|
|
|
|
|
|
|
var forceXaxis |
|
|
|
var forceXaxis |
|
|
|
// setInterval( _ => console.log(forceXaxis), 1000) |
|
|
|
// setInterval( _ => console.log(forceXaxis), 1000) |
|
|
|