Changeset 35304
- Timestamp:
- 2021-08-17T15:03:14+12:00 (3 years ago)
- Location:
- main/trunk/model-interfaces-dev/atea
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
main/trunk/model-interfaces-dev/atea/js/asr/asr-controller.js
r35302 r35304 34 34 /** @type {Boolean} Gets or sets a value indicating if this transcription view has been deleted. */ 35 35 this.isDeleted = false; 36 37 /** @type {{word: String, startTime: Number, endTime: Number, shouldHighlight: Boolean}[]} */ 38 this.words = []; 36 39 } 37 40 } … … 83 86 { 84 87 lastTime = EL_AUDIO_TRANSCRIPTION_AUDIO.currentTime; 85 console.log("time updated! " + lastTime);88 TranscriptionsListVM.currentAudioTime = lastTime; 86 89 } 87 90 … … 189 192 showCharDisplay: false, 190 193 /** @type {{id: String, url: String} | null} Gets the ID of the transcription for which the audio is currently loaded */ 191 currentlyLoadedAudio: null 194 currentlyLoadedAudio: null, 195 currentAudioTime: 0 192 196 } 193 197 }, … … 221 225 this.failures.delete(id); 222 226 }, 223 getWords(transcriptionId)224 {225 /** @type {TranscriptionModel} */226 const transcription = this.transcriptions.get(transcriptionId);227 const words = [];228 229 let lastWord = "";230 let currStartTime = 0;231 232 for (const metadata of transcription.metadata)233 {234 if (metadata.char == ' ')235 {236 lastWord += "\u00A0";237 words.push({ word: lastWord, startTime: currStartTime });238 239 lastWord = "";240 currStartTime = metadata.start_time;241 }242 else243 {244 lastWord += metadata.char;245 }246 }247 248 // Push the last word, as most transcriptions will not end in a space (hence breaking the above algorithm)249 if (lastWord.length > 0) {250 words.push({ word: lastWord, startTime: currStartTime });251 }252 253 return words;254 // console.log(this);255 // console.log(this.$refs);256 // let charsPerLine = Math.floor(this.$refs.wordListContainer.clientWidth / MONOSPACE_CHAR_SIZE);257 258 // return getWidthNormalisedLines(transcription.transcription, charsPerLine);259 },260 227 getChars(transcriptionId) 261 228 { … … 275 242 276 243 return chars; 244 } 245 }, 246 watch: 247 { 248 currentAudioTime(newValue) 249 { 250 // TODO: Replace with more efficient search. Even binary would be a significant improvment for longer transcriptions. 251 const t = this.transcriptions.get(this.currentlyLoadedAudio.id); 252 253 for (let i = 0; i < t.words.length; i++) 254 { 255 const word = t.words[i]; 256 257 if (word.startTime < newValue && word.endTime > newValue) { 258 word.shouldHighlight = true; 259 } 260 else { 261 word.shouldHighlight = false; 262 } 263 } 277 264 } 278 265 } … … 328 315 329 316 let model = new TranscriptionViewModel(t, f); 317 model.words = getTranscriptionWords(t); 330 318 331 319 TranscriptionsListVM.transcriptions.set(model.id, model); … … 342 330 343 331 AudioUploadVM.isTranscribing = false; 332 } 333 334 /** 335 * 336 * @param {TranscriptionModel} transcription The transcription. 337 * @returns {{word: String, startTime: Number, endTime: Number, shouldHighlight: Boolean}[]} 338 */ 339 function getTranscriptionWords(transcription) 340 { 341 /** @type {{word: String, startTime: Number, endTime: Number, shouldHighlight: Boolean}[]} */ 342 const words = []; 343 344 let lastWord = ""; 345 let currStartTime = 0; 346 347 for (const metadata of transcription.metadata) 348 { 349 if (metadata.char == ' ') 350 { 351 lastWord += "\u00A0"; 352 words.push({ word: lastWord, startTime: currStartTime, endTime: metadata.start_time, shouldHighlight: false }); 353 354 lastWord = ""; 355 currStartTime = metadata.start_time; 356 } 357 else 358 { 359 lastWord += metadata.char; 360 } 361 } 362 363 // Push the last word, as most transcriptions will not end in a space (hence breaking the above algorithm) 364 if (lastWord.length > 0) { 365 words.push( 366 { 367 word: lastWord, 368 startTime: currStartTime, 369 endTime: transcription.metadata[transcription.metadata.length].start_time, 370 shouldHighlight: false 371 }); 372 } 373 374 return words; 375 // console.log(this); 376 // console.log(this.$refs); 377 // let charsPerLine = Math.floor(this.$refs.wordListContainer.clientWidth / MONOSPACE_CHAR_SIZE); 378 379 // return getWidthNormalisedLines(transcription.transcription, charsPerLine); 344 380 } 345 381 -
main/trunk/model-interfaces-dev/atea/style/asr.scss
r35302 r35304 121 121 122 122 &:hover { 123 background-color: rgba(255, 255, 0, 0.315) 124 } 123 background-color: rgba(255, 255, 0, 0.4) 124 } 125 } 126 127 .yellow { 128 background-color: rgba(255, 255, 0, 0.4); 125 129 } 126 130 -
main/trunk/model-interfaces-dev/atea/transform/pages/asr.xsl
r35302 r35304 126 126 <ul class="transcription__list"> 127 127 <li v-if="!showCharDisplay"> 128 <span v-for="word in getWords(transcription.id)" class="transcription__word" 129 v-on:click="playAudioFile(id, word.startTime)"> 128 <span v-for="word in transcription.words" class="transcription__word" 129 v-on:click="playAudioFile(id, word.startTime)"> 130 <xsl:attribute name="v-bind:class"> 131 <xsl:text disable-output-escaping="yes">{ yellow: word.shouldHighlight }</xsl:text> 132 </xsl:attribute> 133 130 134 {{ word.word }} 131 135 </span> 132 136 </li> 133 137 <li v-if="showCharDisplay"> 134 <span v-for="char in getChars( transcription.id)" class="transcription__word"138 <span v-for="char in getChars(id)" class="transcription__word" 135 139 v-on:click="playAudioFile(id, char.startTime)"> 136 140 {{ char.char }}
Note:
See TracChangeset
for help on using the changeset viewer.