correct getting duration from content. try to improve error management
#
# See LICENCE for detail
# Copyright (c) 2014 IRI
#
import bisect
import datetime
import json
import logging
import re
import uuid
from dateutil.parser import parse as parse_date_raw
from dateutil.tz import tzutc
from lxml import etree
import requests
logger = logging.getLogger(__name__)
PIANOROLL_CHANNEL = 'PIANOROLL'
ANNOTATION_CHANNEL = 'ANNOT'
class AnnotationsSynchronizer(object):
LDT_CONTENT_REST_API_PATH = "api/ldt/1.0/contents/"
LDT_PROJECT_REST_API_PATH = "api/ldt/1.0/projects/"
DEFAULT_ANNOTATION_CHANNEL = 'ANNOT'
def parse_date(self, datestr):
res = parse_date_raw(datestr)
if res.tzinfo is None:
res = res.replace(tzinfo=tzutc())
return res
def find_delta(self, ts):
if self.deltas:
i = bisect.bisect_right(self.deltas, (ts+1,0))
if i:
return self.deltas[i-1]
return (0,0)
def get_filter(self):
res = []
if self.start_date:
res.append({'name': 'ts', 'op': ">=", 'val':self.start_date.isoformat() })
if self.end_date:
res.append({'name': 'ts', 'op': "<=", 'val':self.end_date.isoformat() })
if self.events:
res.append({'name': 'event_code', 'op': "in", 'val': self.events })
if self.channels:
res.append({'name': 'channel', 'op': "in", 'val': self.channels })
if self.user_whitelist:
res.append({'name': 'user', 'op': "in", 'val': self.user_whitelist })
return res
def build_annotation_iterator(self, params):
page = 0
page_nb = 1
while page < page_nb:
page += 1
params['page'] = page
resp = requests.get(self.annot_url, params=params, headers={'Content-Type': 'application/json'})
if resp.status_code != requests.codes.ok:
self.logger.debug("build_annotation_iterator : request %s : return code %r " % (resp.request.url, resp.status_code))
return
resp_json = resp.json()
page_nb = resp_json.get('total_pages', 1)
for item in resp_json.get('objects', []):
#TODO: add progress log
yield item
def __init__(self, start_date=None, end_date=None, duration=None, content_file=None,
content_file_write=None, project_id=None,
channels=[DEFAULT_ANNOTATION_CHANNEL], events=[], annot_url=None,
user_whitelist=None, post_param={}, deltas=None, base_url="http://ldt.iri.centrepompidou.fr/ldtplatform/",
content=None, content_id=None, video=None,
replace=True, merge=False, name="", batch_size=500,
filename="project.ldt", color="16763904", logger=logger):
self.logger = logger
self.base_url = base_url.rstrip("/")+"/" if base_url else base_url
self.deltas = deltas
self.post_param = {}
if isinstance(post_param, basestring):
self.post_param = json.loads(post_param)
elif post_param:
self.post_param = post_param
self.start_date = None
if start_date:
if isinstance(start_date, datetime.datetime):
self.start_date = start_date
else:
self.start_date = self.parse_date(str(start_date))
self.duration = duration
self.end_date = None
if end_date:
if isinstance(end_date, datetime.datetime):
self.end_date = end_date
else:
self.end_date= self.parse_date(str(end_date))
if end_date:
if isinstance(end_date, datetime.datetime):
self.end_date = end_date
else:
self.end_date= self.parse_date(str(end_date))
elif self.start_date and self.duration:
self.end_date = self.start_date + datetime.timedelta(seconds=self.duration)
self.content_file = content_file
self.project_id = project_id
if self.project_id is not None:
self.content_file = self.base_url + AnnotationsSynchronizer.LDT_PROJECT_REST_API_PATH + self.project_id + "/?format=json"
self.content_file_write = content_file_write
if self.content_file_write is None and self.project_id:
self.content_file_write = self.content_file
self.channels = list(set(channels))
self.annot_url = annot_url
self.events = list(set(events))
self.user_whitelist_file = user_whitelist
if self.user_whitelist_file:
with open(user_whitelist_file, 'r+') as f:
self.user_whitelist = list(set([s.strip() for s in f]))
else:
self.user_whitelist = None
self.content = content
self.content_id = content_id
self.video = video
self.replace = replace
self.merge = merge
self.name = name
self.batch_size = batch_size
self.filename = filename
self.color = color
def export_annotations(self):
root = None
ensemble_parent = None
#to do : analyse situation ldt or iri ? filename set or not ?
if self.content_file and self.content_file.find("http://") == 0:
self.logger.debug("url : " + self.content_file) #@UndefinedVariable
r = requests.get(self.content_file, params=self.post_param)
self.logger.debug("url response " + repr(r) + " content " + repr(r.text)) #@UndefinedVariable
project = r.json()
text_match = re.match(r"\<\?\s*xml.*?\?\>(.*)", project['ldt'], re.I|re.S)
root = etree.fromstring(text_match.group(1) if text_match else project['ldt'])
elif self.content_file and os.path.exists(self.content_file):
doc = etree.parse(self.content_file)
root = doc.getroot()
content_id = None
if root is None:
root = etree.Element(u"iri")
project = etree.SubElement(root, u"project", {u"abstract":u"Annotations",u"title":u"Annotations", u"user":u"IRI Web", u"id":unicode(uuid.uuid4())})
medias = etree.SubElement(root, u"medias")
media = etree.SubElement(medias, u"media", {u"pict":u"", u"src":unicode(self.content), u"video":unicode(self.video), u"id":unicode(self.content_id), u"extra":u""})
annotations = etree.SubElement(root, u"annotations")
content = etree.SubElement(annotations, u"content", {u"id":unicode(self.content_id)})
ensemble_parent = content
content_id = self.content_id
if ensemble_parent is None:
file_type = None
for node in root:
if node.tag == "project":
file_type = "ldt"
break
elif node.tag == "head":
file_type = "iri"
break
if file_type == "ldt":
media_nodes = root.xpath("//media")
if len(media_nodes) > 0:
media = media_nodes[0]
annotations_node = root.find(u"annotations")
if annotations_node is None:
annotations_node = etree.SubElement(root, u"annotations")
content_node = annotations_node.find(u"content")
if content_node is None:
content_node = etree.SubElement(annotations_node,u"content", id=media.get(u"id"))
ensemble_parent = content_node
content_id = content_node.get(u"id")
display_nodes = root.xpath("//displays/display/content[@id='%s']" % content_id)
if len(display_nodes) == 0:
self.logger.info("No display node found. Will not update display")
display_content_node = None
else:
display_content_node = display_nodes[0]
elif file_type == "iri":
body_node = root.find(u"body")
if body_node is None:
body_node = etree.SubElement(root, u"body")
ensembles_node = body_node.find(u"ensembles")
if ensembles_node is None:
ensembles_node = etree.SubElement(body_node, u"ensembles")
ensemble_parent = ensembles_node
content_id = root.xpath("head/meta[@name='id']/@content")[0]
display_content_node = None
if ensemble_parent is None:
self.logger.error("Ensemble parent is None - Can not process file") #@UndefinedVariable
raise Exception("Ensemble parent is None - can not process file")
if self.replace:
for ens in ensemble_parent.iterchildren(tag=u"ensemble"):
ens_id = ens.get("id","")
if ens_id.startswith("annot_"):
ensemble_parent.remove(ens)
# remove in display nodes
if display_content_node is not None:
for cut_display in display_content_node.iterchildren():
if cut_display.get('idens','') == ens_id:
display_content_node.remove(cut_display)
ensemble = None
elements = None
if self.merge:
for ens in ensemble_parent.findall(u"ensemble"):
if ens.get('id',"").startswith("annot_"):
ensemble = ens
break
if ensemble is not None:
elements = ensemble.find(u".//elements")
decoupage = ensemble.find(u"decoupage")
if ensemble is None or elements is None:
ensemble = etree.SubElement(ensemble_parent, u"ensemble", {u"id":u"annot_" + unicode(uuid.uuid4()), u"title":u"Ensemble Annotation", u"author":u"IRI Web", u"abstract":u"Ensemble Annotation"})
decoupage = etree.SubElement(ensemble, u"decoupage", {u"id": unicode(uuid.uuid4()), u"author": u"IRI Web"})
etree.SubElement(decoupage, u"title").text = unicode(self.name)
etree.SubElement(decoupage, u"abstract").text = unicode(self.name)
elements = etree.SubElement(decoupage, u"elements")
ensemble_id = ensemble.get('id', '')
decoupage_id = decoupage.get('id', '') if decoupage is not None else None
if self.end_date is None and self.start_date and self.base_url:
# get duration from api
content_url = self.base_url + AnnotationsSynchronizer.LDT_CONTENT_REST_API_PATH + content_id + "/?format=json"
self.logger.debug("get duration " + content_url) #@UndefinedVariable
r = requests.get(content_url, params=self.post_param)
self.logger.debug("get duration resp " + repr(r)) #@UndefinedVariable
if r.status_code == requests.codes.ok:
self.duration = int(r.json()['duration'])
else:
self.logger.error("Can not get duration form content : %r " % r)
r.raise_for_status()
self.logger.debug("get duration " + repr(self.duration)) #@UndefinedVariable
self.end_date = self.start_date + datetime.timedelta(seconds=int(self.duration/1000))
if self.end_date and self.deltas:
self.end_date = self.end_date + datetime.timedelta(milliseconds=self.deltas[-1][1])
filters = self.get_filter()
params = { 'q':json.dumps({'filters':filters}), 'results_per_page': self.batch_size}
for annot in self.build_annotation_iterator(params):
annot_ts = self.parse_date(annot['ts'])
if self.start_date is None:
star_date = annot_ts
annot_ts_rel = annot_ts-self.start_date
annot_ts_rel_milli = int(round(annot_ts_rel.total_seconds()*1000))
d = self.find_delta(annot_ts_rel_milli)
if d[1] < 0:
continue
else :
annot_ts_rel_milli -= d[1]
annot_content = annot.get('content',{'category':'', 'user':None})
username = annot_content.get('user', 'anon.') or 'anon.'
category = annot_content.get('category', None)
if category is None:
continue
element = etree.SubElement(elements, u"element" , {u"id":annot.get('uuid', uuid.uuid4()), u"color":unicode(self.color), u"author":unicode(username), u"date":unicode(annot_ts.strftime("%Y/%m/%d")), u"begin": unicode(annot_ts_rel_milli), u"dur":u"0"})
etree.SubElement(element, u"title").text = unicode(username) + u": " + unicode(category.get('label', category.get('code', '')))
etree.SubElement(element, u"abstract").text = unicode(category.get('label', category.get('code', '')))
tags_node = etree.SubElement(element, u"tags")
etree.SubElement(tags_node,u"tag").text = category.get('code', '')
meta_element = etree.SubElement(element, u'meta')
polemics_element = etree.Element(u'polemics')
etree.SubElement(polemics_element, u'polemic').text = category.get('code', '')
meta_element.append(polemics_element)
etree.SubElement(meta_element, u"source", attrib={"url":self.annot_url + "/" + annot['uuid'], "mimetype":u"application/json"}).text = etree.CDATA(json.dumps(annot))
# sort by tc in
if self.merge :
# remove all elements and put them in a array
# sort them with tc
#put them back
elements[:] = sorted(elements,key=lambda n: int(n.get('begin')))
#add to display node
if display_content_node is not None:
display_dec = None
for dec in display_content_node.iterchildren(tag=u"decoupage"):
if dec.get('idens','') == ensemble_id and dec.get('id', '') == decoupage_id:
display_dec = dec
break
if display_dec is None and ensemble_id and decoupage_id:
etree.SubElement(display_content_node, u"decoupage", attrib={'idens': ensemble_id, 'id': decoupage_id, 'tagsSelect':''})
output_data = etree.tostring(root, encoding="utf-8", method="xml", pretty_print=False, xml_declaration=True)
if self.content_file_write and self.content_file_write.find("http://") == 0:
project["ldt"] = output_data
project['owner'] = project['owner'].replace('%7E','~')
project['contents'] = [c_url.replace('%7E','~') for c_url in project['contents']]
self.logger.debug("write http " + self.content_file_write) #@UndefinedVariable
self.logger.debug("write http " + repr(self.post_param)) #@UndefinedVariable
self.logger.debug("write http " + repr(project)) #@UndefinedVariable
r = requests.put(self.content_file_write, data=json.dumps(project), headers={'content-type':'application/json'}, params=self.post_param);
self.logger.debug("write http " + repr(r) + " content " + r.text) #@UndefinedVariable
if r.status_code != requests.codes.ok: # @UndefinedVariable
r.raise_for_status()
else:
if self.content_file_write and os.path.exists(self.content_file_write):
dest_file_name = self.content_file_write
else:
dest_file_name = self.filename
self.logger.debug("WRITE : " + dest_file_name) #@UndefinedVariable
output = open(dest_file_name, "w")
output.write(output_data)
output.flush()
output.close()