add utilities to manipulate ldt and zoom chats
authorymh <ymh.work@gmail.com>
Wed, 29 Sep 2021 11:03:46 +0200
changeset 1542 82b5f22448f6
parent 1541 61423ca4e0af
child 1543 808ef2076edc
add utilities to manipulate ldt and zoom chats
script/utils/create_chap_from_csv.py
script/utils/export_chat_zoom.py
script/utils/export_chat_zoom_cloud.py
script/utils/parse_timecode.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/script/utils/create_chap_from_csv.py	Wed Sep 29 11:03:46 2021 +0200
@@ -0,0 +1,295 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+import argparse
+import datetime
+import json
+import os.path
+import re
+import sys
+import uuid  # @UnresolvedImport
+import csv
+from dateutil.parser import parse
+
+import requests
+
+import dateutil.tz
+from iri_tweet.utils import get_logger, set_logging, set_logging_options
+from lxml import etree
+
+LDT_CONTENT_REST_API_PATH = "api/ldt/1.0/contents/"
+LDT_PROJECT_REST_API_PATH = "api/ldt/1.0/projects/"
+
+def get_options():
+
+    parser = argparse.ArgumentParser(description="All date should be given using iso8601 format.")
+
+    parser.add_argument("-f", "--file", dest="filename",
+                      help="write export to file", metavar="FILE", default="project.ldt")
+    parser.add_argument("-d", "--csv-file", dest="csv_file",
+                      help="Input chap file", metavar="CSV_FILE")
+    parser.add_argument("-I", "--content-file", dest="content_file",
+                      help="Content file", metavar="CONTENT_FILE")
+    parser.add_argument("-c", "--content", dest="content",
+                      help="Content url", metavar="CONTENT")
+    parser.add_argument("-V", "--video-url", dest="video",
+                      help="video url", metavar="VIDEO")
+    parser.add_argument("-i", "--content-id", dest="content_id",
+                      help="Content id", metavar="CONTENT_ID")
+    parser.add_argument("-C", "--color", dest="color",
+                      help="Color code", metavar="COLOR", default="16763904")
+    parser.add_argument("-n", "--name", dest="name",
+                      help="Cutting name", metavar="NAME", default="chap")
+    parser.add_argument("-R", "--replace", dest="replace", action="store_true",
+                      help="Replace ensemble", default=False)
+    parser.add_argument("-m", "--merge", dest="merge", action="store_true",
+                      help="merge ensemble, choose the first ensemble", default=False)
+    parser.add_argument("-b", "--base-url", dest="base_url",
+                      help="base URL of the platform", metavar="BASE_URL", default="http://ldt.iri.centrepompidou.fr/ldtplatform/")
+    parser.add_argument("-p", "--project", dest="project_id",
+                      help="Project id", metavar="PROJECT_ID", default=None)
+    parser.add_argument("-P", "--post-param", dest="post_param",
+                      help="Post param", metavar="POST_PARAM", default=None)
+
+    set_logging_options(parser)
+
+    return (parser.parse_args(), parser)
+
+
+if __name__ == "__main__" :
+
+    (options, parser) = get_options()
+
+    set_logging(options)
+
+    get_logger().debug("OPTIONS : " + repr(options)) #@UndefinedVariable
+
+
+    if len(sys.argv) == 1 or options.csv_file is None or not options.csv_file.strip():
+        parser.print_help()
+        sys.exit(1)
+
+    if options.project_id:
+        content_file = options.base_url + LDT_PROJECT_REST_API_PATH + options.project_id + "/?format=json"
+    else:
+        content_file = options.content_file
+    parameters = [{
+        # 'start_date': options.start_date,
+        # 'end_date' : options.end_date,
+        # 'duration' : options.duration,
+        'content_file' : content_file,
+        'content_file_write' : content_file,
+        # 'hashtags' : options.hashtag,
+        'project_id' : options.project_id
+    }]
+    post_param = {}
+    if options.post_param:
+        post_param = json.loads(options.post_param)
+
+    display_content_node = None
+    for params in parameters:
+
+        get_logger().debug("PARAMETERS " + repr(params)) #@UndefinedVariable
+
+        content_file = params.get("content_file", None)
+        content_file_write = params.get("content_file_write", None)
+
+        root = None
+        ensemble_parent = None
+        project = None
+
+        #to do : analyse situation ldt or iri ? filename set or not ?
+
+        if content_file and content_file.find("http") == 0:
+
+            get_logger().debug("url : " + content_file) #@UndefinedVariable
+
+            r = requests.get(content_file, params=post_param)
+            get_logger().debug("url response " + repr(r) + " content " + repr(r.text)) #@UndefinedVariable
+            project = r.json()
+            text_match = re.match(r"\<\?\s*xml.*?\?\>(.*)", project['ldt'], re.I|re.S)
+            root = etree.fromstring(text_match.group(1) if text_match else project['ldt'])
+
+        elif content_file and os.path.exists(content_file):
+
+            doc = etree.parse(content_file)
+            root = doc.getroot()
+            for child in root:
+                if child.tag == "project":
+                    project = child
+                    break
+            if project is None:
+                root = None
+
+        content_id = None
+
+        if root is None:
+
+            root = etree.Element("iri")
+
+            project = etree.SubElement(root, "project", {"abstract":"Chapitrage","title":"Chapitrage", "user":"IRI Web", "id":str(uuid.uuid4())})
+
+            medias = etree.SubElement(root, "medias")
+            media = etree.SubElement(medias, "media", {"pict":"", "src":options.content, "video":options.video, "id":options.content_id, "extra":""})
+
+            annotations = etree.SubElement(root, "annotations")
+            content = etree.SubElement(annotations, "content", {"id":options.content_id})
+            ensemble_parent = content
+
+            content_id = options.content_id
+
+
+        if ensemble_parent is None:
+            file_type = None
+            for node in root:
+                if node.tag == "project":
+                    file_type = "ldt"
+                    break
+                elif node.tag == "head":
+                    file_type = "iri"
+                    break
+
+            if file_type == "ldt":
+                media_nodes = root.xpath("//media")
+                media = None
+                if len(media_nodes) > 0:
+                    media = media_nodes[0]
+                annotations_node = root.find("annotations")
+                if annotations_node is None:
+                    annotations_node = etree.SubElement(root, "annotations")
+                content_node = annotations_node.find("content")
+                if content_node is None and media is not None:
+                    content_node = etree.SubElement(annotations_node,"content", id=media.get("id"))
+                ensemble_parent = content_node
+                content_id = content_node.get("id")
+                display_nodes = root.xpath("//displays/display/content[@id='%s']" % content_id)
+                if len(display_nodes) == 0:
+                    get_logger().info("No display node found. Will not update display")
+                    display_content_node = None
+                else:
+                    display_content_node = display_nodes[0]
+
+            elif file_type == "iri":
+                body_node = root.find("body")
+                if body_node is None:
+                    body_node = etree.SubElement(root, "body")
+                ensembles_node = body_node.find("ensembles")
+                if ensembles_node is None:
+                    ensembles_node = etree.SubElement(body_node, "ensembles")
+                ensemble_parent = ensembles_node
+                content_id = root.xpath("head/meta[@name='id']/@content")[0]
+                display_content_node = None
+
+
+        if ensemble_parent is None:
+            get_logger().error("Can not process file") #@UndefinedVariable
+            sys.exit()
+
+        if options.replace:
+            for ens in ensemble_parent.iterchildren(tag="ensemble"):
+                ens_id = ens.get("id","")
+                if ens_id.startswith("chap_"):
+                    ensemble_parent.remove(ens)
+                    # remove in display nodes
+                    if display_content_node is not None:
+                        for cut_display in display_content_node.iterchildren():
+                            if cut_display.get('idens','') == ens_id:
+                                display_content_node.remove(cut_display)
+
+        ensemble = None
+        elements = None
+        decoupage = None
+
+        if options.merge:
+            for ens in ensemble_parent.findall("ensemble"):
+                if ens.get('id',"").startswith("chap_"):
+                    ensemble = ens
+                    break
+            if ensemble is not None:
+                elements = ensemble.find(".//elements")
+                decoupage = ensemble.find("decoupage")
+
+        if ensemble is None or elements is None:
+            ensemble = etree.SubElement(ensemble_parent, "ensemble", {"id":"chap_" + str(uuid.uuid4()), "title":"Ensemble Chapitrage", "author":"IRI Web", "abstract":"Ensemble Chapitrage"})
+            decoupage = etree.SubElement(ensemble, "decoupage", {"id": str(uuid.uuid4()), "author": "IRI Web"})
+
+            etree.SubElement(decoupage, "title").text = options.name
+            etree.SubElement(decoupage, "abstract").text = options.name
+
+            elements = etree.SubElement(decoupage, "elements")
+
+        ensemble_id = ensemble.get('id', '')
+        decoupage_id = decoupage.get('id', '') if decoupage is not None else None
+
+        with open(options.csv_file.strip()) as csvfilein:
+            chap_reader = csv.DictReader(csvfilein, delimiter=';')
+            for i,chap_row in enumerate(chap_reader):
+
+                ts_start = chap_row['START']
+                dur = int(chap_row['END'])-int(ts_start)
+                username = "IRI"
+                color = "%s"%(int(chap_row['COLOR'].strip("#").lower(),16)) if chap_row['COLOR'] else options.color
+                title = chap_row['TITLE']
+                desc = chap_row['DESCRIPTION']
+                tags = chap_row['TAGS']
+
+                element = etree.SubElement(elements, "element" , {"id": "%s-%s" % (uuid.uuid4(),i), "color":color, "author":username, "date":datetime.datetime.now().strftime("%Y/%m/%d"), "begin": ts_start, "dur":str(dur), "src":"manual"})
+                etree.SubElement(element, "title").text = title[:255]
+                etree.SubElement(element, "abstract").text = desc
+
+                tags_node = etree.SubElement(element, "tags")
+
+                for tag in tags.split(","):
+                    etree.SubElement(tags_node,"tag").text = tag.strip()
+
+                meta_element = etree.SubElement(element, 'meta')
+
+                etree.SubElement(meta_element, "source", attrib={"url":"http://www.iri.centrepompidou.fr", "mimetype":"text/plain"}).text = etree.CDATA(json.dumps({'row': chap_row}))
+
+        # sort by tc in
+        if options.merge :
+            # remove all elements and put them in a array
+            # sort them with tc
+            #put them back
+            elements[:] = sorted(elements,key=lambda n: int(n.get('begin')))
+
+        #add to display node
+        if display_content_node is not None:
+            display_dec = None
+            for dec in display_content_node.iterchildren(tag="decoupage"):
+                if dec.get('idens','') == ensemble_id and dec.get('id', '') == decoupage_id:
+                    display_dec = dec
+                    break
+            if display_dec is None and ensemble_id and decoupage_id:
+                etree.SubElement(display_content_node, "decoupage", attrib={'idens': ensemble_id, 'id': decoupage_id, 'tagsSelect':''})
+
+        output_data = etree.tostring(root, encoding="utf-8", method="xml", pretty_print=False, xml_declaration=True).decode('utf-8')
+
+        if content_file_write and content_file_write.find("http") == 0:
+
+            project["ldt"] = output_data
+            project['owner'] = project['owner'].replace('%7E','~')
+            project['contents'] = [c_url.replace('%7E','~') for c_url in project['contents']]
+
+            post_param = {}
+            if options.post_param:
+                post_param = json.loads(options.post_param)
+
+            get_logger().debug("write http " + content_file_write) #@UndefinedVariable
+            get_logger().debug("write http " + repr(post_param)) #@UndefinedVariable
+            get_logger().debug("write http " + repr(project)) #@UndefinedVariable
+            r = requests.put(content_file_write, data=json.dumps(project), headers={'content-type':'application/json'}, params=post_param)
+            get_logger().debug("write http " + repr(r) + " content " + r.text) #@UndefinedVariable
+            if r.status_code != requests.codes.ok:  # pylint: disable=E1101
+                r.raise_for_status()
+        else:
+            if content_file_write and os.path.exists(content_file_write):
+                dest_file_name = content_file_write
+            else:
+                dest_file_name = options.filename
+
+            get_logger().debug("WRITE : " + dest_file_name) #@UndefinedVariable
+            output = open(dest_file_name, "w")
+            output.write(output_data)
+            output.flush()
+            output.close()
--- a/script/utils/export_chat_zoom.py	Wed Jan 13 10:40:41 2021 +0100
+++ b/script/utils/export_chat_zoom.py	Wed Sep 29 11:03:46 2021 +0200
@@ -18,12 +18,6 @@
 from iri_tweet.utils import get_logger, set_logging, set_logging_options
 from lxml import etree
 
-#class TweetExclude(object):
-#    def __init__(self, id):
-#        self.id = id
-#
-#    def __repr__(self):
-#        return "<TweetExclude(id=%d)>" % (self.id)
 
 LDT_CONTENT_REST_API_PATH = "api/ldt/1.0/contents/"
 LDT_PROJECT_REST_API_PATH = "api/ldt/1.0/projects/"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/script/utils/export_chat_zoom_cloud.py	Wed Sep 29 11:03:46 2021 +0200
@@ -0,0 +1,477 @@
+#!/usr/bin/env python
+# coding=utf-8
+
+import argparse
+import bisect
+import datetime
+import json
+import os.path
+import re
+import sys
+import uuid  # @UnresolvedImport
+
+import requests
+
+from dateutil.parser import parse as parse_date
+from iri_tweet.utils import get_logger, set_logging, set_logging_options
+from lxml import etree
+
+
+LDT_CONTENT_REST_API_PATH = "api/ldt/1.0/contents/"
+LDT_PROJECT_REST_API_PATH = "api/ldt/1.0/projects/"
+
+
+def re_fn(expr, item):
+    reg = re.compile(expr, re.I)
+    res = reg.search(item)
+    if res:
+        get_logger().debug("re_fn : " + repr(expr) + "~" + repr(item)) #@UndefinedVariable
+    return res is not None
+
+def parse_polemics_1(tw_text, extended_mode):
+    """
+    parse polemics in text and return a list of polemic code. None if not polemic found
+    """
+    polemics = {}
+    for m in re.finditer(r"(\+\+|\-\-|\?\?|\=\=)",tw_text):
+        pol_link = {
+            '++' : 'OK',
+            '--' : 'KO',
+            '??' : 'Q',
+            '==' : 'REF'}[m.group(1)]
+        polemics[pol_link] = pol_link
+
+    if extended_mode:
+        if "?" in tw_text:
+            polemics["Q"] = "Q"
+
+    if len(polemics) > 0:
+        return polemics.keys()
+    else:
+        return None
+
+def parse_polemics_2(tw_text, extended_mode):
+    """
+    parse polemics in text and return a list of polemic code. None if not polemic found
+    """
+    polemics = {}
+    for m in re.finditer(r"(\+\+|\!\!|\?\?|\=\=)",tw_text):
+        pol_link = {
+            '++' : 'OK',
+            '!!' : 'KO',
+            '??' : 'Q',
+            '==' : 'REF'}[m.group(1)]
+        polemics[pol_link] = pol_link
+
+    if extended_mode:
+        if "?" in tw_text:
+            polemics["Q"] = "Q"
+
+
+    if len(polemics) > 0:
+        return polemics.keys()
+    else:
+        return None
+
+def parse_polemics_3(tw_text, extended_mode):
+    """
+    parse polemics in text and return a list of polemic code. None if not polemic found
+    """
+    polemics = {}
+    for m in re.finditer(r"(\+\+|\?\?|\*\*|\=\=)",tw_text):
+        pol_link = {
+            '++' : 'OK',
+            '??' : 'KO',
+            '**' : 'REF',
+            '==' : 'Q'}[m.group(1)]
+        polemics[pol_link] = pol_link
+
+    if len(polemics) > 0:
+        return polemics.keys()
+    else:
+        return None
+
+
+protocol_version_map = {
+    "1" : parse_polemics_1,
+    "2" : parse_polemics_2,
+    "3" : parse_polemics_3
+}
+
+def get_options():
+
+    parser = argparse.ArgumentParser(description="All date should be given using iso8601 format. If no timezone is used, the date is considered as UTC")
+
+    parser.add_argument("-f", "--file", dest="filename",
+                      help="write export to file", metavar="FILE", default="project.ldt")
+    parser.add_argument("-d", "--chat-database", dest="database",
+                      help="Input chat file", metavar="CHAT_DATABASE")
+    parser.add_argument("-s", "--start-date", dest="start_date",
+                      help="start date", metavar="START_DATE", default=None)
+    parser.add_argument("-a", "--annotation-protocol", dest="protocol_version",
+                      help="annotation protocol version", metavar="PROTOCOL_VERSION",
+                      default="2")
+    parser.add_argument("-I", "--content-file", dest="content_file",
+                      help="Content file", metavar="CONTENT_FILE")
+    parser.add_argument("-c", "--content", dest="content",
+                      help="Content url", metavar="CONTENT")
+    parser.add_argument("-V", "--video-url", dest="video",
+                      help="video url", metavar="VIDEO")
+    parser.add_argument("-i", "--content-id", dest="content_id",
+                      help="Content id", metavar="CONTENT_ID")
+    parser.add_argument("-C", "--color", dest="color",
+                      help="Color code", metavar="COLOR", default="16763904")
+    parser.add_argument("-D", "--duration", dest="duration", type=int,
+                      help="Duration", metavar="DURATION", default=None)
+    parser.add_argument("-n", "--name", dest="name",
+                      help="Cutting name", metavar="NAME", default="Chats")
+    parser.add_argument("-R", "--replace", dest="replace", action="store_true",
+                      help="Replace tweet ensemble", default=False)
+    parser.add_argument("-m", "--merge", dest="merge", action="store_true",
+                      help="merge tweet ensemble, choose the first ensemble", default=False)
+    parser.add_argument("-E", "--extended", dest="extended_mode", action="store_true",
+                      help="Trigger polemic extended mode", default=False)
+    parser.add_argument("-b", "--base-url", dest="base_url",
+                      help="base URL of the platform", metavar="BASE_URL", default="http://ldt.iri.centrepompidou.fr/ldtplatform/")
+    parser.add_argument("-p", "--project", dest="project_id",
+                      help="Project id", metavar="PROJECT_ID", default=None)
+    parser.add_argument("-P", "--post-param", dest="post_param",
+                      help="Post param", metavar="POST_PARAM", default=None)
+    parser.add_argument("--user-whitelist", dest="user_whitelist", action="store",
+                      help="A list of user screen name", metavar="USER_WHITELIST",default=None)
+    parser.add_argument("--cut", dest="cuts", action="append",
+                      help="A cut with the forma <ts in ms>::<duration>", metavar="CUT", default=[])
+
+    set_logging_options(parser)
+
+    return (parser.parse_args(), parser)
+
+
+def find_delta(deltas, ts):
+    i = bisect.bisect_right(deltas, (ts+1,0))
+    if i:
+        return deltas[i-1]
+    return (0,0)
+
+
+def parse_duration(s):
+    try:
+        return int(s)
+    except ValueError:
+        parts = s.split(":")
+        if len(parts) < 2:
+            raise ValueError("Bad duration format")
+        time_params = {
+            'hours': int(parts[0]),
+            'minutes': int(parts[1]),
+            'seconds': int(parts[2]) if len(parts)>2 else 0
+        }
+        return int(round(datetime.timedelta(**time_params).total_seconds()*1000))
+
+CHAT_REGEXP = re.compile(r"^(?P<created_at>\d{2}:\d{2}:\d{2})\t(?P<user>.+?)\s?:\s(?P<text>.*)$", re.DOTALL)
+CHAT_LINE_REGEXP = re.compile(r"^\d{2}:\d{2}:\d{2}\t.+?:\s")
+
+def parse_chat_line(chat_id, chat_line):
+    if (m := CHAT_REGEXP.match(chat_line)) is not None:
+        res = {k: v.replace('\r','\n') if k == 'text' else v for k,v in m.groupdict().items()}
+        res['id'] = chat_id
+        res['tags'] = re.findall('#(\w+)',res['text'])
+        return res
+    else:
+        return {}
+
+def read_chat_file(chat_file_path):
+    current_line = ""
+    chat_content = []
+    with open(chat_file_path, "r") as chat_file:
+        for chat_line in chat_file:
+            if CHAT_LINE_REGEXP.match(chat_line) is not None:
+                if current_line:
+                    chat_content.append(current_line)
+                current_line = chat_line
+            else:
+                current_line = current_line + "\n" + chat_line
+    if current_line:
+        chat_content.append(current_line)
+    return chat_content
+
+
+if __name__ == "__main__" :
+
+    (options, parser) = get_options()
+
+    set_logging(options)
+
+    get_logger().debug("OPTIONS : " + repr(options)) #@UndefinedVariable
+
+
+    deltas = [(0,0)]
+    total_delta = 0
+    if options.cuts:
+        cuts_raw = sorted([tuple([parse_duration(s) for s in c.split("::")]) for c in options.cuts])
+        for c, d in cuts_raw:
+            deltas.append((c+total_delta, -1))
+            total_delta += d
+            deltas.append((c+total_delta, total_delta))
+
+    if len(sys.argv) == 1 or options.database is None:
+        parser.print_help()
+        sys.exit(1)
+
+    user_whitelist_file = options.user_whitelist
+    user_whitelist = None
+
+    if options.project_id:
+        content_file = options.base_url + LDT_PROJECT_REST_API_PATH + options.project_id + "/?format=json"
+    else:
+        content_file = options.content_file
+
+    params = {
+        'start_date': options.start_date,
+        'duration' : options.duration,
+        'content_file' : content_file,
+        'content_file_write' : content_file,
+        'project_id' : options.project_id
+    }
+    post_param = {}
+
+    if options.post_param:
+        post_param = json.loads(options.post_param)
+
+    display_content_node = None
+
+    get_logger().debug("PARAMETERS " + repr(params)) #@UndefinedVariable
+
+    start_date_str = params.get("start_date",None)
+    duration = params.get("duration", None)
+    content_file = params.get("content_file", None)
+    content_file_write = params.get("content_file_write", None)
+    if user_whitelist_file:
+        with open(user_whitelist_file, 'r+') as f:
+            user_whitelist = list(set([s.strip() for s in f]))
+
+    start_date = datetime.datetime.now()
+    if start_date_str:
+        start_date = parse_date(start_date_str)
+
+    root = None
+    ensemble_parent = None
+    project = None
+
+    #to do : analyse situation ldt or iri ? filename set or not ?
+
+    if content_file and content_file.find("http") == 0:
+
+        get_logger().debug("url : " + content_file) #@UndefinedVariable
+
+        r = requests.get(content_file, params=post_param)
+        get_logger().debug("url response " + repr(r) + " content " + repr(r.text)) #@UndefinedVariable
+        project = r.json()
+        text_match = re.match(r"\<\?\s*xml.*?\?\>(.*)", project['ldt'], re.I|re.S)
+        root = etree.fromstring(text_match.group(1) if text_match else project['ldt'])
+
+    elif content_file and os.path.exists(content_file):
+
+        doc = etree.parse(content_file)
+        root = doc.getroot()
+        for child in root:
+            if child.tag == "project":
+                project = child
+                break
+        if project is None:
+            root = None
+
+    content_id = None
+
+    if root is None:
+
+        root = etree.Element("iri")
+
+        project = etree.SubElement(root, "project", {"abstract":"Polemics Chat","title":"Polemic Chat", "user":"IRI Web", "id":str(uuid.uuid4())})
+
+        medias = etree.SubElement(root, "medias")
+        media = etree.SubElement(medias, "media", {"pict":"", "src":options.content, "video":options.video, "id":options.content_id, "extra":""})
+
+        annotations = etree.SubElement(root, "annotations")
+        content = etree.SubElement(annotations, "content", {"id":options.content_id})
+        ensemble_parent = content
+
+        content_id = options.content_id
+
+
+    if ensemble_parent is None:
+        file_type = None
+        for node in root:
+            if node.tag == "project":
+                file_type = "ldt"
+                break
+            elif node.tag == "head":
+                file_type = "iri"
+                break
+
+        if file_type == "ldt":
+            media_nodes = root.xpath("//media")
+            media = None
+            if len(media_nodes) > 0:
+                media = media_nodes[0]
+            annotations_node = root.find("annotations")
+            if annotations_node is None:
+                annotations_node = etree.SubElement(root, "annotations")
+            content_node = annotations_node.find("content")
+            if content_node is None and media is not None:
+                content_node = etree.SubElement(annotations_node,"content", id=media.get("id"))
+            ensemble_parent = content_node
+            content_id = content_node.get("id")
+            display_nodes = root.xpath("//displays/display/content[@id='%s']" % content_id)
+            if len(display_nodes) == 0:
+                get_logger().info("No display node found. Will not update display")
+                display_content_node = None
+            else:
+                display_content_node = display_nodes[0]
+
+        elif file_type == "iri":
+            body_node = root.find("body")
+            if body_node is None:
+                body_node = etree.SubElement(root, "body")
+            ensembles_node = body_node.find("ensembles")
+            if ensembles_node is None:
+                ensembles_node = etree.SubElement(body_node, "ensembles")
+            ensemble_parent = ensembles_node
+            content_id = root.xpath("head/meta[@name='id']/@content")[0]
+            display_content_node = None
+
+
+    if ensemble_parent is None:
+        get_logger().error("Can not process file") #@UndefinedVariable
+        sys.exit()
+
+    if options.replace:
+        for ens in ensemble_parent.iterchildren(tag="ensemble"):
+            ens_id = ens.get("id","")
+            if ens_id.startswith("chat_"):
+                ensemble_parent.remove(ens)
+                # remove in display nodes
+                if display_content_node is not None:
+                    for cut_display in display_content_node.iterchildren():
+                        if cut_display.get('idens','') == ens_id:
+                            display_content_node.remove(cut_display)
+
+    ensemble = None
+    elements = None
+    decoupage = None
+
+    if options.merge:
+        for ens in ensemble_parent.findall("ensemble"):
+            if ens.get('id',"").startswith("chat_"):
+                ensemble = ens
+                break
+        if ensemble is not None:
+            elements = ensemble.find(".//elements")
+            decoupage = ensemble.find("decoupage")
+
+    if ensemble is None or elements is None:
+        ensemble = etree.SubElement(ensemble_parent, "ensemble", {"id":"chat_" + str(uuid.uuid4()), "title":"Ensemble Chat", "author":"IRI Web", "abstract":"Ensemble Chat"})
+        decoupage = etree.SubElement(ensemble, "decoupage", {"id": str(uuid.uuid4()), "author": "IRI Web"})
+
+        etree.SubElement(decoupage, "title").text = options.name
+        etree.SubElement(decoupage, "abstract").text = options.name
+
+        elements = etree.SubElement(decoupage, "elements")
+
+    ensemble_id = ensemble.get('id', '')
+    decoupage_id = decoupage.get('id', '') if decoupage is not None else None
+
+    if not duration and options.base_url:
+        content_url = options.base_url + LDT_CONTENT_REST_API_PATH + content_id + "/?format=json"
+        r = requests.get(content_url)
+        duration = int(r.json()['duration'])
+        get_logger().debug("get duration " + content_url) #@UndefinedVariable
+        get_logger().debug("get duration " + repr(duration)) #@UndefinedVariable
+
+    chat_content_lines = read_chat_file(options.database.strip())
+    for i,chat_line in enumerate(chat_content_lines):
+
+        cht = parse_chat_line("%04d" % (i+1) ,chat_line.strip())
+
+        #TODO parse chat line
+        cht_ts_dt = cht['created_at']
+        cht_ts_rel_milli = parse_duration(cht_ts_dt)
+        element_date = start_date + datetime.timedelta(milliseconds=cht_ts_rel_milli)
+        if deltas:
+            d = find_delta(deltas, cht_ts_rel_milli)
+            if d[1] < 0:
+                continue
+            else :
+                cht_ts_rel_milli -= d[1]
+
+        username = cht['user'] or "anon."
+
+        element = etree.SubElement(elements, "element" , {"id": "%s-%s" % (uuid.uuid4(),cht['id']), "color":options.color, "author":username, "date":element_date.strftime("%Y/%m/%d"), "begin": str(cht_ts_rel_milli), "dur":"0", "src":"zoom"})
+        etree.SubElement(element, "title").text = username + ": " + cht['text'][:255]
+        etree.SubElement(element, "abstract").text = cht['text']
+
+        tags_node = etree.SubElement(element, "tags")
+
+        for tag in cht['tags']:
+            etree.SubElement(tags_node,"tag").text = tag
+
+        meta_element = etree.SubElement(element, 'meta')
+
+        etree.SubElement(meta_element, "polemic_version").text = options.protocol_version
+        parse_polemics = protocol_version_map.get(options.protocol_version, parse_polemics_2)
+        polemics_list = parse_polemics(cht['text'], options.extended_mode)
+        if polemics_list:
+            polemics_element = etree.Element('polemics')
+            for pol in polemics_list:
+                etree.SubElement(polemics_element, 'polemic').text = pol
+            meta_element.append(polemics_element)
+
+        etree.SubElement(meta_element, "source", attrib={"url":"http://zoom.io", "mimetype":"text/plain"}).text = etree.CDATA(json.dumps({'chat': chat_line}))
+
+    # sort by tc in
+    if options.merge :
+        # remove all elements and put them in a array
+        # sort them with tc
+        #put them back
+        elements[:] = sorted(elements,key=lambda n: int(n.get('begin')))
+
+    #add to display node
+    if display_content_node is not None:
+        display_dec = None
+        for dec in display_content_node.iterchildren(tag="decoupage"):
+            if dec.get('idens','') == ensemble_id and dec.get('id', '') == decoupage_id:
+                display_dec = dec
+                break
+        if display_dec is None and ensemble_id and decoupage_id:
+            etree.SubElement(display_content_node, "decoupage", attrib={'idens': ensemble_id, 'id': decoupage_id, 'tagsSelect':''})
+
+    output_data = etree.tostring(root, encoding="utf-8", method="xml", pretty_print=False, xml_declaration=True).decode('utf-8')
+
+    if content_file_write and content_file_write.find("http") == 0:
+
+        project["ldt"] = output_data
+        project['owner'] = project['owner'].replace('%7E','~')
+        project['contents'] = [c_url.replace('%7E','~') for c_url in project['contents']]
+
+        post_param = {}
+        if options.post_param:
+            post_param = json.loads(options.post_param)
+
+        get_logger().debug("write http " + content_file_write) #@UndefinedVariable
+        get_logger().debug("write http " + repr(post_param)) #@UndefinedVariable
+        get_logger().debug("write http " + repr(project)) #@UndefinedVariable
+        r = requests.put(content_file_write, data=json.dumps(project), headers={'content-type':'application/json'}, params=post_param)
+        get_logger().debug("write http " + repr(r) + " content " + r.text) #@UndefinedVariable
+        if r.status_code != requests.codes.ok:  # pylint: disable=E1101
+            r.raise_for_status()
+    else:
+        if content_file_write and os.path.exists(content_file_write):
+            dest_file_name = content_file_write
+        else:
+            dest_file_name = options.filename
+
+        get_logger().debug("WRITE : " + dest_file_name) #@UndefinedVariable
+        output = open(dest_file_name, "w")
+        output.write(output_data)
+        output.flush()
+        output.close()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/script/utils/parse_timecode.py	Wed Sep 29 11:03:46 2021 +0200
@@ -0,0 +1,25 @@
+import csv
+import re
+import math
+
+TC_RE = re.compile("(\d{2}) (\d{2}) (\d{2}) ?, ?(\d{2})")
+
+def conver_tc(tc_str):
+    m = TC_RE.match(tc_str)
+    if not m:
+        return math.inf
+    return int(m.group(1))*3600000 + int(m.group(2))*60000 + int(m.group(3)) * 1000 + int(m.group(4)) * 10
+
+if __name__ == "__main__":
+    with open('timecodes_webinaire_fcpe.csv') as csvfilein, \
+        open('timecodes_webinaire_fcpe_out.csv', 'w') as csvfileout:
+        chap_reader = csv.DictReader(csvfilein, delimiter=';')
+        chap_writer = csv.DictWriter(csvfileout, fieldnames=chap_reader.fieldnames, delimiter=';')
+
+        chap_writer.writeheader()
+        for row in chap_reader:
+            if not row['START']:
+                next
+            row['START'] = conver_tc(row['START'])
+            row['END'] = conver_tc(row['END'])
+            chap_writer.writerow(row)
\ No newline at end of file