2024-08-13 06:57:58 +00:00
window . generateDiff = function ( text1 , text2 , divDiff , divLegend ) {
let wikEdDiff = new WikEdDiff ( ) ;
let targetDiv = document . getElementById ( divDiff )
targetDiv . innerHTML = wikEdDiff . diff ( text1 , text2 ) ;
2024-09-15 20:21:33 +00:00
targetDiv . classList . add ( 'mud-typography-body1' , 'improvedDiff' ) ;
2024-08-13 06:57:58 +00:00
let legend = document . getElementById ( divLegend ) ;
legend . innerHTML = `
< div class = "legend mt-2" >
< h3 > Legend < / h 3 >
< ul class = "mt-2" >
< li > < span class = "wikEdDiffMarkRight" title = "Moved block" id = "wikEdDiffMark999" onmouseover = "wikEdDiffBlockHandler(undefined, this, 'mouseover');" > < / s p a n > O r i g i n a l b l o c k p o s i t i o n < / l i >
< li > < span title = "+" class = "wikEdDiffInsert" > Inserted < span class = "wikEdDiffSpace" > < span class = "wikEdDiffSpaceSymbol" > < / s p a n > < / s p a n > t e x t < s p a n c l a s s = " w i k E d D i f f N e w l i n e " > < / s p a n > < / s p a n > < / l i >
< li > < span title = "− " class = "wikEdDiffDelete" > Deleted < span class = "wikEdDiffSpace" > < span class = "wikEdDiffSpaceSymbol" > < / s p a n > < / s p a n > t e x t < s p a n c l a s s = " w i k E d D i f f N e w l i n e " > < / s p a n > < / s p a n > < / l i >
< li > < span class = "wikEdDiffBlockLeft" title = "◀" id = "wikEdDiffBlock999" onmouseover = "wikEdDiffBlockHandler(undefined, this, 'mouseover');" > Moved < span class = "wikEdDiffSpace" > < span class = "wikEdDiffSpaceSymbol" > < / s p a n > < / s p a n > b l o c k < s p a n c l a s s = " w i k E d D i f f N e w l i n e " > < / s p a n > < / s p a n > < / l i >
< / u l >
< / d i v >
` ;
2024-08-18 19:48:35 +00:00
}
window . clearDiv = function ( divName ) {
let targetDiv = document . getElementById ( divName ) ;
targetDiv . innerHTML = '' ;
2024-08-23 08:32:27 +00:00
}
window . scrollToBottom = function ( element ) {
2024-09-05 17:37:54 +00:00
element . scrollIntoView ( { behavior : 'smooth' , block : 'end' , inline : 'nearest' } ) ;
2026-01-07 11:56:11 +00:00
}
2026-01-18 14:42:40 +00:00
// Shared the audio context for sound effects (Web Audio API does not register with Media Session):
2026-01-18 13:20:09 +00:00
let soundEffectContext = null ;
2026-01-18 14:42:40 +00:00
// Cache for decoded sound effect audio buffers:
2026-01-18 13:20:09 +00:00
const soundEffectCache = new Map ( ) ;
2026-01-18 14:42:40 +00:00
// Track the preload state:
let soundEffectsPreloaded = false ;
// Queue system: tracks when the next sound can start playing.
// This prevents sounds from overlapping and getting "swallowed" by the audio system:
let nextAvailablePlayTime = 0 ;
// Minimum gap between sounds in seconds (small buffer to ensure clean transitions):
const SOUND _GAP _SECONDS = 0.55 ;
// List of all sound effects used in the app:
const SOUND _EFFECT _PATHS = [
'/sounds/start_recording.ogg' ,
'/sounds/stop_recording.ogg' ,
'/sounds/transcription_done.ogg'
] ;
// Initialize the audio context with low-latency settings.
// Should be called from a user interaction (click, keypress)
// to satisfy browser autoplay policies:
window . initSoundEffects = async function ( ) {
if ( soundEffectContext && soundEffectContext . state !== 'closed' ) {
// Already initialized, just ensure it's running:
if ( soundEffectContext . state === 'suspended' ) {
await soundEffectContext . resume ( ) ;
}
return ;
}
try {
// Create the context with the interactive latency hint for the lowest latency:
soundEffectContext = new ( window . AudioContext || window . webkitAudioContext ) ( {
latencyHint : 'interactive'
} ) ;
// Resume immediately (needed for Safari/macOS):
if ( soundEffectContext . state === 'suspended' ) {
await soundEffectContext . resume ( ) ;
}
// Reset the queue timing:
nextAvailablePlayTime = 0 ;
//
// Play a very short silent buffer to "warm up" the audio pipeline.
// This helps prevent the first real sound from being cut off:
//
const silentBuffer = soundEffectContext . createBuffer ( 1 , 1 , soundEffectContext . sampleRate ) ;
const silentSource = soundEffectContext . createBufferSource ( ) ;
silentSource . buffer = silentBuffer ;
silentSource . connect ( soundEffectContext . destination ) ;
silentSource . start ( 0 ) ;
console . log ( 'Sound effects - AudioContext initialized with latency:' , soundEffectContext . baseLatency ) ;
// Preload all sound effects in parallel:
if ( ! soundEffectsPreloaded ) {
await window . preloadSoundEffects ( ) ;
}
} catch ( error ) {
console . warn ( 'Failed to initialize sound effects:' , error ) ;
}
} ;
// Preload all sound effect files into the cache:
window . preloadSoundEffects = async function ( ) {
if ( soundEffectsPreloaded ) {
return ;
}
// Ensure that the context exists:
if ( ! soundEffectContext || soundEffectContext . state === 'closed' ) {
soundEffectContext = new ( window . AudioContext || window . webkitAudioContext ) ( {
latencyHint : 'interactive'
} ) ;
}
console . log ( 'Sound effects - preloading' , SOUND _EFFECT _PATHS . length , 'sound files...' ) ;
const preloadPromises = SOUND _EFFECT _PATHS . map ( async ( soundPath ) => {
try {
const response = await fetch ( soundPath ) ;
const arrayBuffer = await response . arrayBuffer ( ) ;
const audioBuffer = await soundEffectContext . decodeAudioData ( arrayBuffer ) ;
soundEffectCache . set ( soundPath , audioBuffer ) ;
console . log ( 'Sound effects - preloaded:' , soundPath , 'duration:' , audioBuffer . duration . toFixed ( 2 ) , 's' ) ;
} catch ( error ) {
console . warn ( 'Sound effects - failed to preload:' , soundPath , error ) ;
}
} ) ;
await Promise . all ( preloadPromises ) ;
soundEffectsPreloaded = true ;
console . log ( 'Sound effects - all files preloaded' ) ;
} ;
2026-01-18 13:20:09 +00:00
window . playSound = async function ( soundPath ) {
2026-01-11 15:02:28 +00:00
try {
2026-01-18 14:42:40 +00:00
// Initialize context if needed (fallback if initSoundEffects wasn't called):
2026-01-18 13:20:09 +00:00
if ( ! soundEffectContext || soundEffectContext . state === 'closed' ) {
2026-01-18 14:42:40 +00:00
soundEffectContext = new ( window . AudioContext || window . webkitAudioContext ) ( {
latencyHint : 'interactive'
} ) ;
nextAvailablePlayTime = 0 ;
2026-01-18 13:20:09 +00:00
}
// Resume if suspended (browser autoplay policy):
if ( soundEffectContext . state === 'suspended' ) {
await soundEffectContext . resume ( ) ;
}
// Check the cache for already decoded audio:
let audioBuffer = soundEffectCache . get ( soundPath ) ;
if ( ! audioBuffer ) {
2026-01-18 14:42:40 +00:00
// Fetch and decode the audio file (fallback if not preloaded):
console . log ( 'Sound effects - loading on demand:' , soundPath ) ;
2026-01-18 13:20:09 +00:00
const response = await fetch ( soundPath ) ;
const arrayBuffer = await response . arrayBuffer ( ) ;
audioBuffer = await soundEffectContext . decodeAudioData ( arrayBuffer ) ;
soundEffectCache . set ( soundPath , audioBuffer ) ;
}
2026-01-18 14:42:40 +00:00
// Calculate when this sound should start:
const currentTime = soundEffectContext . currentTime ;
let startTime ;
if ( currentTime >= nextAvailablePlayTime ) {
// No sound is playing, or the previous sound has finished; start immediately:
startTime = 0 ; // 0 means "now" in Web Audio API
nextAvailablePlayTime = currentTime + audioBuffer . duration + SOUND _GAP _SECONDS ;
} else {
// A sound is still playing; schedule this sound to start after it:
startTime = nextAvailablePlayTime ;
nextAvailablePlayTime = startTime + audioBuffer . duration + SOUND _GAP _SECONDS ;
console . log ( 'Sound effects - queued:' , soundPath , 'will play in' , ( startTime - currentTime ) . toFixed ( 2 ) , 's' ) ;
}
// Create a new source node and schedule playback:
2026-01-18 13:20:09 +00:00
const source = soundEffectContext . createBufferSource ( ) ;
source . buffer = audioBuffer ;
source . connect ( soundEffectContext . destination ) ;
2026-01-18 14:42:40 +00:00
source . start ( startTime ) ;
console . log ( 'Sound effects - playing:' , soundPath ) ;
2026-01-11 15:02:28 +00:00
} catch ( error ) {
2026-01-18 13:20:09 +00:00
console . warn ( 'Failed to play sound effect:' , error ) ;
2026-01-11 15:02:28 +00:00
}
} ;
2026-01-07 11:56:11 +00:00
let mediaRecorder ;
let actualRecordingMimeType ;
let changedMimeType = false ;
let pendingChunkUploads = 0 ;
2026-01-18 14:42:40 +00:00
// Store the media stream so we can close the microphone later:
let activeMediaStream = null ;
// Delay in milliseconds to wait after getUserMedia() for Bluetooth profile switch (A2DP → HFP):
const BLUETOOTH _PROFILE _SWITCH _DELAY _MS = 1_600 ;
2026-01-07 11:56:11 +00:00
window . audioRecorder = {
start : async function ( dotnetRef , desiredMimeTypes = [ ] ) {
const stream = await navigator . mediaDevices . getUserMedia ( { audio : true } ) ;
2026-01-18 14:42:40 +00:00
activeMediaStream = stream ;
// Wait for Bluetooth headsets to complete the profile switch from A2DP to HFP.
// This prevents the first sound from being cut off during the switch:
console . log ( 'Audio recording - waiting for Bluetooth profile switch...' ) ;
await new Promise ( r => setTimeout ( r , BLUETOOTH _PROFILE _SWITCH _DELAY _MS ) ) ;
2026-01-07 11:56:11 +00:00
// Play start recording sound effect:
2026-01-18 14:42:40 +00:00
await window . playSound ( '/sounds/start_recording.ogg' ) ;
2026-01-07 11:56:11 +00:00
// When only one mime type is provided as a string, convert it to an array:
if ( typeof desiredMimeTypes === 'string' ) {
desiredMimeTypes = [ desiredMimeTypes ] ;
}
// Log sent mime types for debugging:
console . log ( 'Audio recording - requested mime types: ' , desiredMimeTypes ) ;
let mimeTypes = desiredMimeTypes . filter ( type => typeof type === 'string' && type . trim ( ) !== '' ) ;
// Next, we have to ensure that we have some default mime types to check as well.
// In case the provided list does not contain these, we append them:
// Use provided mime types or fallback to a default list:
const defaultMimeTypes = [
'audio/webm' ,
'audio/ogg' ,
'audio/mp4' ,
'audio/mpeg' ,
'' // Fallback to browser default
] ;
defaultMimeTypes . forEach ( type => {
if ( ! mimeTypes . includes ( type ) ) {
mimeTypes . push ( type ) ;
}
} ) ;
console . log ( 'Audio recording - final mime types to check (included defaults): ' , mimeTypes ) ;
// Find the first supported mime type:
actualRecordingMimeType = mimeTypes . find ( type =>
type === '' || MediaRecorder . isTypeSupported ( type )
) || '' ;
console . log ( 'Audio recording - the browser selected the following mime type for recording: ' , actualRecordingMimeType ) ;
const options = actualRecordingMimeType ? { mimeType : actualRecordingMimeType } : { } ;
mediaRecorder = new MediaRecorder ( stream , options ) ;
// In case the browser changed the mime type:
actualRecordingMimeType = mediaRecorder . mimeType ;
console . log ( 'Audio recording - actual mime type used by the browser: ' , actualRecordingMimeType ) ;
// Check the list of desired mime types against the actual one:
if ( ! desiredMimeTypes . includes ( actualRecordingMimeType ) ) {
changedMimeType = true ;
console . warn ( ` Audio recording - requested mime types (' ${ desiredMimeTypes . join ( ', ' ) } ') do not include the actual mime type used by the browser (' ${ actualRecordingMimeType } '). ` ) ;
} else {
changedMimeType = false ;
}
// Reset the pending uploads counter:
pendingChunkUploads = 0 ;
// Stream each chunk directly to .NET as it becomes available:
mediaRecorder . ondataavailable = async ( event ) => {
if ( event . data . size > 0 ) {
pendingChunkUploads ++ ;
try {
const arrayBuffer = await event . data . arrayBuffer ( ) ;
const uint8Array = new Uint8Array ( arrayBuffer ) ;
await dotnetRef . invokeMethodAsync ( 'OnAudioChunkReceived' , uint8Array ) ;
} catch ( error ) {
console . error ( 'Error sending audio chunk to .NET:' , error ) ;
} finally {
pendingChunkUploads -- ;
}
}
} ;
mediaRecorder . start ( 3000 ) ; // read the recorded data in 3-second chunks
return actualRecordingMimeType ;
} ,
stop : async function ( ) {
return new Promise ( ( resolve ) => {
// Add an event listener to handle the stop event:
mediaRecorder . onstop = async ( ) => {
// Wait for all pending chunk uploads to complete before finalizing:
console . log ( ` Audio recording - waiting for ${ pendingChunkUploads } pending uploads. ` ) ;
while ( pendingChunkUploads > 0 ) {
await new Promise ( r => setTimeout ( r , 10 ) ) ; // wait 10 ms before checking again
}
console . log ( 'Audio recording - all chunks uploaded, finalizing.' ) ;
// Play stop recording sound effect:
2026-01-18 14:42:40 +00:00
await window . playSound ( '/sounds/stop_recording.ogg' ) ;
//
// IMPORTANT: Do NOT release the microphone here!
// Bluetooth headsets switch profiles (HFP → A2DP) when the microphone is released,
// which causes audio to be interrupted. We keep the microphone open so that the
// stop_recording and transcription_done sounds can play without interruption.
//
// Call window.audioRecorder.releaseMicrophone() after the last sound has played.
//
2026-01-07 11:56:11 +00:00
// No need to process data here anymore, just signal completion:
resolve ( {
mimeType : actualRecordingMimeType ,
changedMimeType : changedMimeType ,
} ) ;
} ;
// Finally, stop the recording (which will actually trigger the onstop event):
mediaRecorder . stop ( ) ;
} ) ;
2026-01-18 14:42:40 +00:00
} ,
// Release the microphone after all sounds have been played.
// This should be called after the transcription_done sound to allow
// Bluetooth headsets to switch back to A2DP profile without interrupting audio:
releaseMicrophone : function ( ) {
if ( activeMediaStream ) {
console . log ( 'Audio recording - releasing microphone (Bluetooth will switch back to A2DP)' ) ;
activeMediaStream . getTracks ( ) . forEach ( track => track . stop ( ) ) ;
activeMediaStream = null ;
}
2026-01-07 11:56:11 +00:00
}
} ;