--- a/src/ldt/ldt/api/ldt/resources/content.py Wed Feb 06 14:33:29 2013 +0100
+++ b/src/ldt/ldt/api/ldt/resources/content.py Thu Feb 07 13:41:49 2013 +0100
@@ -26,7 +26,7 @@
class Meta:
allowed_methods = ['get']
resource_name = 'contents'
- queryset = Content.objects.select_related('front_project').all()
+ queryset = Content.objects.select_related('front_project','media_obj').all()
filtering = {
'tags' : ALL_WITH_RELATIONS,
'title' : ALL,
@@ -34,7 +34,7 @@
ordering = ['title', 'creation_date', 'content_creation_date']
def get_object_list(self, request):
- return Content.safe_objects.all()
+ return Content.safe_objects.select_related('front_project', 'media_obj').all()
def override_urls(self):
# WARNING : in tastypie <= 1.0, override_urls is used instead of prepend_urls. From 1.0.0, prepend_urls will be prefered and override_urls deprecated
--- a/src/ldt/ldt/api/ldt/resources/segment.py Wed Feb 06 14:33:29 2013 +0100
+++ b/src/ldt/ldt/api/ldt/resources/segment.py Thu Feb 07 13:41:49 2013 +0100
@@ -9,7 +9,9 @@
from tastypie.http import HttpNotFound
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
+import logging
+logger = logging.getLogger(__name__)
class SegmentResource(ModelResource):
class Meta:
@@ -84,7 +86,7 @@
begin = int(begin)
end = int(end)
- content = Content.objects.filter(iri_id=iri_id)
+ content = Content.objects.filter(iri_id=iri_id).select_related('media_obj', 'stat_annotation')
if not content:
return HttpNotFound("Content does not exist or id is not correct.")
content = content[0]
@@ -93,7 +95,7 @@
Q(start_ts__gte=begin, start_ts__lte=end) | # segment starts between begin and end
Q(start_ts__gte=begin-F('duration'), start_ts__lte=end-F('duration')) |# segment ends between begin and end
Q(start_ts__lte=begin, start_ts__gte=end-F('duration')) # period [begin:end] is included in the segment
- )
+ ).select_related("project_obj")
a = SegmentSerializer(content, segments)
return self.create_response(request, a.serialize_to_cinelab())
--- a/src/ldt/ldt/ldt_utils/projectserializer.py Wed Feb 06 14:33:29 2013 +0100
+++ b/src/ldt/ldt/ldt_utils/projectserializer.py Thu Feb 07 13:41:49 2013 +0100
@@ -1,17 +1,17 @@
+from datetime import datetime
from django.conf import settings
-from datetime import datetime
+from django.contrib.contenttypes.models import ContentType
from django.utils.datastructures import SortedDict
-from ldt.ldt_utils.models import Content
+from ldt.ldt_utils.models import Content, User, Project
+from ldt.ldt_utils.stat import get_string_from_buckets
from ldt.ldt_utils.utils import reduce_text_node
-from ldt.ldt_utils.models import User, Project
-from ldt.ldt_utils.stat import get_string_from_buckets
-from ldt.security.utils import use_forbidden_url
import logging
import lxml.etree
import uuid
DATE_FORMATS = ["%d/%m/%Y", "%Y-%m-%d"]
+logger = logging.getLogger(__name__)
"""
Serialize a project object to a cinelab compatible array
@@ -40,6 +40,7 @@
def __parse_views(self, display_node_list):
+ logger.debug("__parse_views start")
for display_node in display_node_list:
display_id = display_node.get(u"id", None)
if not display_id:
@@ -90,11 +91,13 @@
new_display['annotation_types'] = [new_display['annotation_types'][0]]
self.views_dict[display_id] = new_display
-
+ logger.debug("__parse_views done")
def __parse_ensemble(self, ensemble_node, content, cutting_only=None):
+
+ ensemble_id = ensemble_node.attrib[u"id"]
+ logger.debug("__parse_ensemble %s start" % ensemble_id)
- ensemble_id = ensemble_node.attrib[u"id"]
ensemble_author = ensemble_node.attrib[u"author"]
ensemble_title = ensemble_node.attrib[u"title"]
ensemble_description = ensemble_node.attrib[u"abstract"]
@@ -280,10 +283,13 @@
if not list_items:
new_list["items"] = None
self.lists_dict[ensemble_id] = new_list
+
+ logger.debug("__parse_ensemble %s done" % ensemble_id)
def __parse_ldt(self):
+ logger.debug("__parse_ldt start")
self.ldt_doc = lxml.etree.fromstring(self.project.ldt_encoded)
if self.from_display:
@@ -293,20 +299,35 @@
self.__parse_views(self.ldt_doc.xpath(xpath_str))
+ #getting all contents at once
+ contents_iri_id = list(
+ set(self.ldt_doc.xpath("/iri/medias/media/@id")) |
+ set(self.ldt_doc.xpath("/iri/annotations/content/@id")) |
+ (set(self.ldt_doc.xpath('/iri/annotations/content[ensemble/decoupage/@id=\'%s\']/@id' % self.first_cutting)) if self.first_cutting and self.first_cutting not in self.display_cuttings_list else set())
+ )
+
+ logger.debug(contents_iri_id)
+
+ contents = dict([ (c.iri_id, c) for c in Content.objects.filter(iri_id__in=contents_iri_id).select_related('media_obj', 'stat_annotation').prefetch_related("authors")])
+ m_cls = ContentType.objects.get(model='media')
+ m_cls = m_cls.model_class()
+ medias = dict([ (m.id, m) for m in m_cls.safe_objects.filter(id__in = [c.media_obj.id for c in contents.values() if c.media_obj])])
+
+
res = self.ldt_doc.xpath("/iri/medias/media")
for mediaNode in res:
iri_id = mediaNode.attrib[u"id"]
if self.from_display and iri_id not in self.display_contents_list:
continue
- content = Content.objects.get(iri_id=iri_id) #@UndefinedVariable
- self.__parse_content(content)
+ content = contents[iri_id]#Content.objects.get(iri_id=iri_id) #@UndefinedVariable
+ self.__parse_content(content, medias)
res = self.ldt_doc.xpath("/iri/annotations/content")
for content_node in res:
content_id = content_node.attrib[u"id"]
if self.from_display and content_id not in self.display_contents_list:
continue
- content = Content.objects.get(iri_id=content_id) #@UndefinedVariable
+ content = contents[iri_id]#Content.objects.get(iri_id=content_id) #@UndefinedVariable
for ensemble_node in content_node:
if ensemble_node.tag != "ensemble" :
continue
@@ -320,7 +341,7 @@
ensemble_node = cutting_node.xpath('..')[0]
content_node = ensemble_node.xpath('..')[0]
iri_id = content_node.get("id")
- content = Content.objects.get(iri_id=iri_id)
+ content = contents[iri_id]#Content.objects.get(iri_id=iri_id)
self.__parse_ensemble(ensemble_node, content, cutting_only=[cutting_node])
@@ -341,9 +362,11 @@
self.__parse_edits()
self.parsed = True
+ logger.debug("__parse_ldt done")
def __parse_edits(self):
+ logger.debug("__parse_edits start")
editings = self.ldt_doc.xpath("/iri/edits/editing")
if not editings:
return False
@@ -378,10 +401,11 @@
}
}
self.lists_dict[e_id] = new_list
-
+ logger.debug("__parse_edits done")
- def __parse_content(self, content):
+ def __parse_content(self, content, medias):
+ logger.debug("__parse_content %s start" % content.iri_id)
doc = lxml.etree.parse(content.iri_file_path())
authors = content.authors.all()
@@ -412,7 +436,7 @@
url = ""
meta_item_value = ""
- if use_forbidden_url(content):
+ if content.media_obj and content.media_obj.id not in medias:
url = settings.FORBIDDEN_STREAM_URL
elif content.videopath:
url = content.videopath.rstrip('/') + "/" + content.src
@@ -459,15 +483,20 @@
res = doc.xpath("/iri/body/ensembles/ensemble")
for ensemble_node in res:
self.__parse_ensemble(ensemble_node, content)
+
+ logger.debug("__parse_content %s done" % content.iri_id)
def serialize_to_cinelab(self):
res = {}
-
+
+ logger.debug("serialize_to_cinelab before parse ldt")
+
if not self.parsed:
self.__parse_ldt()
+ logger.debug("serialize_to_cinelab parse ldt done")
project_main_media = ""
if len(self.medias_dict) > 0:
--- a/src/ldt/ldt/ldt_utils/searchutils.py Wed Feb 06 14:33:29 2013 +0100
+++ b/src/ldt/ldt/ldt_utils/searchutils.py Thu Feb 07 13:41:49 2013 +0100
@@ -3,7 +3,9 @@
from ldt.ldt_utils.utils import LdtUtils
from ldt.security.utils import set_forbidden_stream
import base64
+import logging
+logger = logging.getLogger(__name__)
def search_generate_ldt(request, field, query, query_encoded=True):
@@ -28,13 +30,21 @@
id_list = ids.keys()
projId_list = projIds.keys()
typesId_list = typesIds.keys()
-
- contentList = Content.objects.filter(iri_id__in=id_list) #@UndefinedVariable
- projectList = Project.safe_objects.filter(ldt_id__in=projId_list)
+
+ logger.debug("search_generate_ldt : getting content list")
+ contentList = Content.objects.filter(iri_id__in=id_list).select_related("front_project", "media_obj") #@UndefinedVariable
+ logger.debug("search_generate_ldt : getting project list")
+ projectList = Project.safe_objects.filter(ldt_id__in=projId_list)
+
+
ldtgen = LdtUtils()
# generate_ldt(contentList, title=u"", author=u"IRI Web", web_url=u"", startSegment=None, projects=None):
+ logger.debug("search_generate_ldt : generate ldt")
doc = ldtgen.generate_ldt(contentList, title=u"Recherche : " + queryStr, projects=projectList, types_id_list=typesId_list)
+
+ logger.debug("search_generate_ldt : set forbidden streams")
doc = set_forbidden_stream(doc, request.user)
+ logger.debug("search_generate_ldt : done")
return doc, results
\ No newline at end of file
--- a/src/ldt/ldt/ldt_utils/segmentserializer.py Wed Feb 06 14:33:29 2013 +0100
+++ b/src/ldt/ldt/ldt_utils/segmentserializer.py Thu Feb 07 13:41:49 2013 +0100
@@ -3,9 +3,12 @@
from ldt.ldt_utils.stat import get_string_from_buckets
from ldt.security.utils import use_forbidden_url
from tagging.utils import parse_tag_input
+import logging
import lxml.etree
import uuid
+logger = logging.getLogger(__name__)
+
DATE_FORMATS = ["%d/%m/%Y", "%Y-%m-%d"]
class SegmentSerializer(object):
@@ -30,13 +33,11 @@
self.xml_docs = {}
- def __get_cutting_title(self, project_id, content_id, ensemble_id, cutting_id):
+ def __get_cutting_title(self, project_id, content_id, ensemble_id, cutting_id, project):
if not self.xml_docs.has_key(project_id):
- project = Project.objects.filter(ldt_id=project_id)
if not project:
return None
- project = project[0]
doc = lxml.etree.fromstring(project.ldt_encoded)
self.xml_docs[project_id] = doc
else:
@@ -74,7 +75,7 @@
annotation_types = []
for seg in self.segments:
- title = self.__get_cutting_title(seg.project_id, seg.iri_id, seg.ensemble_id, seg.cutting_id)
+ title = self.__get_cutting_title(seg.project_id, seg.iri_id, seg.ensemble_id, seg.cutting_id, seg.project_obj)
annotation_types.append({'id': seg.cutting_id, 'title': title})
for a in annotation_types:
@@ -98,7 +99,8 @@
url = ""
meta_item_value = ""
-
+
+ logger.debug("__parse_content start")
if use_forbidden_url(self.content):
url = settings.FORBIDDEN_STREAM_URL
elif self.content.videopath:
@@ -107,6 +109,8 @@
else:
url = self.content.src
+ logger.debug("__parse_content url %s " % url)
+
media = {
"http://advene.liris.cnrs.fr/ns/frame_of_reference/ms" : "o=0",
"id" : self.content.iri_id,
@@ -192,12 +196,16 @@
def serialize_to_cinelab(self):
+ logger.debug("serialize_to_cinelab start")
if not self.segments:
return None
self.__parse_content()
+ logger.debug("serialize_to_cinelab parse content done")
self.__parse_segments()
+ logger.debug("serialize_to_cinelab parse segments done")
self.__parse_views()
+ logger.debug("serialize_to_cinelab parse views done")
res = {}
res['views'] = self.views
@@ -207,5 +215,7 @@
res['annotations'] = self.annotations
res['annotation-types'] = self.annotation_types
+ logger.debug("serialize_to_cinelab done")
+
return res
\ No newline at end of file
--- a/src/ldt/ldt/ldt_utils/views/json.py Wed Feb 06 14:33:29 2013 +0100
+++ b/src/ldt/ldt/ldt_utils/views/json.py Thu Feb 07 13:41:49 2013 +0100
@@ -14,6 +14,8 @@
import lxml.etree
import logging
+logger = logging.getLogger(__name__)
+
def project_json_id(request, id):
@@ -96,7 +98,6 @@
# do we remove annotations from mashup if the have duration=0 ? (yes by default)
remove_zero_dur_str = request.REQUEST.get("removezeroduration")
remove_zero_dur = True
- logging.debug("CC 1 " + str(remove_zero_dur))
if remove_zero_dur_str:
remove_zero_dur = {'true': True, 'false': False, "0": False, "1": True}.get(remove_zero_dur_str.lower())
@@ -106,14 +107,23 @@
if s:
# We get the projects with all the segments
project_xml, results = search_generate_ldt(request, "tags", s, False)
+
+ logger.debug("mashup_by_tag : search_generate_ldt done")
+
project = Project()
project.ldt = lxml.etree.tostring(project_xml, pretty_print=True)
# Needed datas for jsonification
now = datetime.now()
project.modification_date = project.creation_date = now
#return HttpResponse(lxml.etree.tostring(project_xml, pretty_print=True), mimetype="text/xml;charset=utf-8")
+ logger.debug("mashup_by_tag : serialize_to_cinelab prepare")
+
ps = ProjectJsonSerializer(project, from_contents=False)
+ logger.debug("mashup_by_tag : serialize_to_cinelab serializer ready")
mashup_dict = ps.serialize_to_cinelab()
+
+ logger.debug("mashup_by_tag : serialize_to_cinelab done")
+
# Now we build the mashup with the good segments (the ones between in and out)
if results:
tc_in = 0
@@ -204,8 +214,6 @@
json_str = "%s(%s)" % (callback, json_str)
resp = HttpResponse(mimetype="application/json; charset=utf-8")
- resp['Cache-Control'] = 'no-cache, must-revalidate'
- resp['Pragma'] = 'no-cache'
resp.write(json_str)
return resp
--- a/src/ldt/ldt/security/utils.py Wed Feb 06 14:33:29 2013 +0100
+++ b/src/ldt/ldt/security/utils.py Thu Feb 07 13:41:49 2013 +0100
@@ -5,6 +5,9 @@
from cache import get_cached_userlist, cached_assign
from ldt.security import change_security
import types
+import logging
+
+logger = logging.getLogger(__name__)
def unprotect_instance(instance):
if hasattr(instance, 'old_save'):
@@ -27,24 +30,28 @@
cls = ContentType.objects.get(model='content')
cls = cls.model_class()
+ m_cls = ContentType.objects.get(model='media')
+ m_cls = m_cls.model_class()
+
+ content_ids = xml.xpath('/iri/medias/media/@id')
+ contents = dict( [(c.iri_id, c) for c in cls.safe_objects.filter(iri_id__in=content_ids).select_related("media_obj")])
+
+ medias = dict([(m.id,m) for m in m_cls.safe_objects.filter(id__in=[c.media_obj.id for c in contents.values()])])
+
for elem in xml.xpath('/iri/medias/media'):
- content = cls.safe_objects.filter(iri_id=elem.get('id'))
- if content and use_forbidden_url(content[0]) :
+ content = contents.get(elem.get('id'), None)
+ if content and (content.media_obj and content.media_obj.id not in medias) :
elem.set('video', settings.FORBIDDEN_STREAM_URL)
return xml
def use_forbidden_url(content):
- c_cls = ContentType.objects.get(model='content')
- c_cls = c_cls.model_class()
m_cls = ContentType.objects.get(model='media')
m_cls = m_cls.model_class()
- new_content = c_cls.safe_objects.filter(iri_id=content.iri_id)
- if new_content:
- if new_content[0].media_obj:
- media = m_cls.safe_objects.filter(id=new_content[0].media_obj.id)
- if not media:
- return True
+ if content.media_obj:
+ media = m_cls.safe_objects.filter(id=content.media_obj.id)
+ if not media:
+ return True
return False
def add_change_attr(user, obj_list):