src/widgets/Transcript.js
author ymh <ymh.work@gmail.com>
Tue, 22 Oct 2024 07:03:54 +0200
changeset 1076 510fd2a482f4
parent 1072 ac1eacb3aa33
permissions -rw-r--r--
Add Dailymotion Tech and remove unused libs

/* This widget displays annotations as a transcript */
import Mustache from "mustache";
import jQuery from "jquery";

import transcriptStyles from "./Transcript.module.css";

const Transcript = function (ns) {
  return class extends ns.Widgets.Widget {
    constructor(player, config) {
      super(player, config);
    }

    static defaults = {
      annotation_type: "Caption",
      use_vtt_track: false,
    };

    static template = '<div class="Ldt-TranscriptWidget"></div>';

    static annotationTemplate =
      '<span data-begin="{{ begin }}" data-end="{{ end }}" data-id="{{ id }}" class="Ldt-Transcript-Annotation">{{ content }}</span>  ';

    draw() {
      var _annotations = this.getWidgetAnnotations();
      var _this = this;
      var content;

      _this.renderTemplate();
      content = _this.$.find(".Ldt-TranscriptWidget");

      if (_this.use_vtt_track) {
        // Use webvtt track. It will only work with native video player.
        var widgets = _this.player.widgets.filter(function (w) {
          return w.type == "HtmlPlayer";
        });
        if (widgets) {
          var v = widgets[0].$.find("video")[0];
          // FIXME: allow to specify the used track
          v.addEventListener("loadedmetadata", function () {
            var track = v.textTracks[0];
            var cues = track.cues;
            var i = 1;
            Array.prototype.forEach.apply(cues, [
              function (_c) {
                _c.id = "cue" + i;
                var _html = Mustache.render(_this.annotationTemplate, {
                  id: _c.id,
                  content: _c.text,
                  begin: 1000 * _c.startTime,
                  end: 1000 * _c.endTime,
                });
                i += 1;
                var _el = jQuery(_html);
                content.append(_el);
              },
            ]);
            track.addEventListener(
              "cuechange",
              function () {
                var acues = track.activeCues;
                if (acues.length > 0) {
                  // Update attributes for active cues
                  _this.$.find(".Ldt-Transcript-Annotation.active").removeClass(
                    "active"
                  );
                  Array.prototype.forEach.apply(acues, [
                    function (_c) {
                      _this.$.find("#" + _c.id).addClass("active");
                    },
                  ]);
                }
              },
              false
            );
            content.on("click", ".Ldt-Transcript-Annotation", function () {
              _this.media.setCurrentTime(this.dataset.begin);
            });
          });
        } else {
          console.log("cannot find a video object");
        }
      } else {
        // Populate with annotation data
        _annotations.forEach(function (_a) {
          var _data = {
            id: _a.id,
            content: ns.textFieldHtml(_a.title),
            begin: _a.begin.toString(),
            end: _a.end.toString(),
          };
          var _html = Mustache.render(_this.annotationTemplate, _data);
          var _el = jQuery(_html);
          content.append(_el);
        });
      }
    }
  };
};

export { Transcript, transcriptStyles };