# HG changeset patch # User ymh # Date 1264738411 -3600 # Node ID 146d86480e85d0c541da9a7730230d708a8453f9 # Parent db6ed4430381a13798b2b1f9023247fee4ba3f36 add query for details diff -r db6ed4430381 -r 146d86480e85 web/blinkster/__init__.py --- a/web/blinkster/__init__.py Thu Jan 28 23:12:32 2010 +0100 +++ b/web/blinkster/__init__.py Fri Jan 29 05:13:31 2010 +0100 @@ -1,3 +1,3 @@ -VERSION = (0,5) +VERSION = (0,6) VERSION_STR = unicode(".".join(map(lambda i:"%02d" % (i,), VERSION))) diff -r db6ed4430381 -r 146d86480e85 web/blinkster/ldt/contentindexer.py --- a/web/blinkster/ldt/contentindexer.py Thu Jan 28 23:12:32 2010 +0100 +++ b/web/blinkster/ldt/contentindexer.py Fri Jan 29 05:13:31 2010 +0100 @@ -6,7 +6,7 @@ import blinkster.utils.log import blinkster.utils.xml from blinkster import settings -from blinkster.ldt.models import Content +from blinkster.ldt.models import Content, Segment import xml import xml.dom import xml.dom.minidom @@ -76,6 +76,10 @@ doc = lucene.Document() elementId = elementNode.getAttribute("id") tags = elementNode.getAttribute("tags") + author = elementNode.getAttribute("author") + start_ts = int(elementNode.getAttribute("begin")) + duration = int(elementNode.getAttribute("dur")) + date_str = elementNode.getAttribute("date") if tags is not None: tags.replace(",", ";") @@ -109,6 +113,20 @@ doc.add(lucene.Field("abstract", abstract, lucene.Field.Store.NO, lucene.Field.Index.TOKENIZED)) doc.add(lucene.Field("all", " ".join([tags, title, abstract]), lucene.Field.Store.NO, lucene.Field.Index.TOKENIZED)) + seg = Segment(content = content, + iri_id = content.iri_id, + ensemble_id = ensembleId, + cutting_id = decoupId, + element_id = elementId, + tags = tags, + title = title, + abstract = abstract, + duration = duration, + author = author, + start_ts = start_ts, + date = date_str) + seg.save() + self.__writer.addDocument(doc) self.__writer.flush() @@ -168,6 +186,10 @@ doc = lucene.Document() elementId = elementNode.getAttribute("id") tags = elementNode.getAttribute("tags") + author = elementNode.getAttribute("author") + start_ts = int(elementNode.getAttribute("begin")) + duration = int(elementNode.getAttribute("dur")) + date_str = elementNode.getAttribute("date") if tags is not None: tags.replace(",", ";") @@ -192,6 +214,7 @@ for txtRes in xml.xpath.Evaluate("abstract/text()", elementNode): abstract = abstract + txtRes.data + doc.add(lucene.Field("project_id", project.iri_id, lucene.Field.Store.YES, lucene.Field.Index.UN_TOKENIZED)) doc.add(lucene.Field("iri_id", contentId, lucene.Field.Store.YES, lucene.Field.Index.UN_TOKENIZED)) doc.add(lucene.Field("ensemble_id", ensembleId, lucene.Field.Store.YES, lucene.Field.Index.NO)) @@ -202,6 +225,28 @@ doc.add(lucene.Field("abstract", abstract, lucene.Field.Store.NO, lucene.Field.Index.TOKENIZED)) doc.add(lucene.Field("all", " ".join([tags, title, abstract]), lucene.Field.Store.NO, lucene.Field.Index.TOKENIZED)) + try: + content = Content.objects.get(iri_id = contentId) + seg = Segment( project_obj = project, + content = content, + project_id = project.ldt_id, + iri_id = contentId, + ensemble_id = ensembleId, + cutting_id = decoupId, + element_id = elementId, + tags = tags, + title = title, + abstract = abstract, + duration = duration, + author = author, + start_ts = start_ts, + date = date_str) + seg.save() + except: + blinkster.utils.log.error("unable to store segment") + + + self.__writer.addDocument(doc) self.__writer.flush() diff -r db6ed4430381 -r 146d86480e85 web/blinkster/ldt/models.py --- a/web/blinkster/ldt/models.py Thu Jan 28 23:12:32 2010 +0100 +++ b/web/blinkster/ldt/models.py Fri Jan 29 05:13:31 2010 +0100 @@ -48,6 +48,7 @@ def __unicode__(self): return str(self.id) + ": " + self.iri_id + class LdtProject(models.Model): @@ -60,9 +61,33 @@ def __unicode__(self): return unicode(self.id) + u": " + unicode(self.ldt_id) + +class Segment(models.Model): + + project_obj = models.ForeignKey(LdtProject, null=True) + content = models.ForeignKey(Content) + project_id = models.CharField(max_length=1024, unique=False, blank = True, null=True) + iri_id = models.CharField(max_length=1024, unique=False) + ensemble_id = models.CharField(max_length=1024, unique=False) + cutting_id = models.CharField(max_length=1024, unique=False) + element_id = models.CharField(max_length=1024, unique=False) + tags = models.CharField(max_length=2048, unique=False, null=True, blank=True) + title = models.CharField(max_length=2048, unique=False, null=True, blank=True) + duration = models.IntegerField(null=True) + start_ts = models.IntegerField(null=True) + author = models.CharField(max_length=1024, unique=False, null=True, blank=True) + date = models.CharField(max_length=128, unique=False, null=True, blank=True) + abstract = models.TextField(null=True, blank=True) + + def __unicode__(self): + return "/".join((unicode(self.project_id), unicode(self.iri_id), unicode(self.ensemble_id), unicode(self.cutting_id), unicode(self.element_id))) + + class Meta: + unique_together = (('project_id','iri_id','ensemble_id','cutting_id','element_id'),) admin.site.register(Content) admin.site.register(Author) admin.site.register(LdtProject) +admin.site.register(Segment) diff -r db6ed4430381 -r 146d86480e85 web/blinkster/ldt/utils.py --- a/web/blinkster/ldt/utils.py Thu Jan 28 23:12:32 2010 +0100 +++ b/web/blinkster/ldt/utils.py Fri Jan 29 05:13:31 2010 +0100 @@ -17,14 +17,13 @@ class LdtSearch(object): def query(self, field, query): + res = [] indexSearcher = lucene.IndexSearcher(STORE) #queryParser = lucene.MultiFieldQueryParser(["tags","title","abstract"], ANALYZER) queryParser = lucene.QueryParser(field, lucene.FrenchAnalyzer()) queryParser.setDefaultOperator(lucene.QueryParser.Operator.AND) queryObj = queryParser.parse(query) hits = indexSearcher.search(queryObj) - - res = [] for hit in hits: doc = lucene.Hit.cast_(hit).getDocument() res.append({"iri_id":doc.get("iri_id"),"ensemble_id":doc.get("ensemble_id"),"decoupage_id":doc.get("decoupage_id"), "element_id":doc.get("element_id")}) diff -r db6ed4430381 -r 146d86480e85 web/blinkster/templates/base.html --- a/web/blinkster/templates/base.html Thu Jan 28 23:12:32 2010 +0100 +++ b/web/blinkster/templates/base.html Fri Jan 29 05:13:31 2010 +0100 @@ -3,7 +3,7 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> - {% block title %}Pocket Films 2009{% endblock %} + {% block title %}Blinkster{% endblock %} {% block js_import %} {% endblock %} {% block css_import %} @@ -32,7 +32,7 @@ Logo IRI
{{VERSION}}
-
©2009 IRI / Centre Pompidou
+
©2010 IRI / Centre Pompidou
{% endblock %} diff -r db6ed4430381 -r 146d86480e85 web/blinkster/templates/segment_detail.html --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/web/blinkster/templates/segment_detail.html Fri Jan 29 05:13:31 2010 +0100 @@ -0,0 +1,33 @@ +{% extends "base.html" %} + +{% block content %} +
+ +

Segment :

+ +

Titre :

+
{{segment.title}}
+ +

Description :

+
{{segment.abstract}}
+ +

Tags :

+
{{segment.tags}}
+ +

Détails :

+ + + + +
+{% endblock %} \ No newline at end of file diff -r db6ed4430381 -r 146d86480e85 web/blinkster/urls.py --- a/web/blinkster/urls.py Thu Jan 28 23:12:32 2010 +0100 +++ b/web/blinkster/urls.py Fri Jan 29 05:13:31 2010 +0100 @@ -40,6 +40,9 @@ #(r'^.*(?Pflvplayer|mp3player|ClearExternalAllBlue)\.swf$','django.views.generic.simple.redirect_to', {'url':blinkster.settings.BASE_URL+'/static/swf/ldt/pkg/%(content)s.swf'}), (r'^roi/list/$', 'blinkster.views.roi_list'), (r'^poi/list/(?P.*)/$', 'blinkster.views.poi_list'), + (r'^segment/detail/(?P.*)/(?P.*)/(?P.*)/(?P.*)/$', 'blinkster.views.segment_detail'), + (r'^segment/search/$','blinkster.views.segment_search_post'), + (r'^segment/search/(?Ptags|title|abstract|all)/(?Por|and)/(?P.*)/$','blinkster.views.segment_search_get'), (r'^photologue/', include('photologue.urls')), ) diff -r db6ed4430381 -r 146d86480e85 web/blinkster/utils/context_processors.py --- a/web/blinkster/utils/context_processors.py Thu Jan 28 23:12:32 2010 +0100 +++ b/web/blinkster/utils/context_processors.py Fri Jan 29 05:13:31 2010 +0100 @@ -1,11 +1,11 @@ -import blinkster.settings; +from django.conf import settings; import blinkster; def version(request): return {'VERSION': blinkster.VERSION_STR } def base(request): - return {'BASE_URL': blinkster.settings.BASE_URL } + return {'BASE_URL': settings.BASE_URL, 'MEDIA_URL': settings.MEDIA_URL } def web(request): - return {'WEB_URL': enmi.settings.WEB_URL } + return {'WEB_URL': settings.WEB_URL } diff -r db6ed4430381 -r 146d86480e85 web/blinkster/views.py --- a/web/blinkster/views.py Thu Jan 28 23:12:32 2010 +0100 +++ b/web/blinkster/views.py Fri Jan 29 05:13:31 2010 +0100 @@ -7,15 +7,23 @@ from django.core import serializers from django.core.serializers.json import DjangoJSONEncoder from django.utils import simplejson +from django.conf import settings +from blinkster.ldt.models import Segment +from blinkster.ldt.utils import LdtSearch from blinkster.models import Roi, Poi import blinkster +import re +import urllib2 -def serialize_queryset_to_json(response, obj_list): +def serialize_queryset_to_json(response, obj_list, extra=None): objs = { "version" : blinkster.VERSION, "list" : obj_list, } + if extra: + objs = dict(objs, **extra) + simplejson.dump(objs, response, cls=DjangoJSONEncoder,ensure_ascii=False, indent=4) return response @@ -24,10 +32,61 @@ content_type = request.GET.get("content-type", "application/json; charset=utf-8") response = HttpResponse(content_type=str(content_type)) - return serialize_queryset_to_json(response, [roi.serialize_to_dict() for roi in Roi.objects.all()]) + results = [] + for roi_dict in [roi.serialize_to_dict() for roi in Roi.objects.all()]: + roi_dict["poi_list_url"] = unicode(settings.WEB_URL.rstrip('/') + reverse('blinkster.views.poi_list',args=[roi_dict["sid"]])) + results.append(roi_dict) + + return serialize_queryset_to_json(response, results) def poi_list(request, roi_sid): content_type = request.GET.get("content-type", "application/json; charset=utf-8") response = HttpResponse(content_type=str(content_type)) roi = get_object_or_404(Roi, sid=roi_sid) - return serialize_queryset_to_json(response, [poi.serialize_to_dict() for poi in Poi.objects.filter(roi=roi)]) + results = [] + for poi_dict in [poi.serialize_to_dict() for poi in Poi.objects.filter(roi=roi)]: + poi_dict["segment_search_url"] = unicode(settings.WEB_URL.rstrip('/') + reverse('blinkster.views.segment_search_get',args=[u"tags",u"or",urllib2.quote(poi_dict["tags"].encode("utf-8"))])) + results.append(poi_dict) + return serialize_queryset_to_json(response, results) + +# Display segment detail. +# This will have to be moved to ldt module +def segment_detail(request, iri_id, ensemble_id, cutting_id, element_id, project_id = None): + + segment = get_object_or_404(Segment, project_id=project_id, iri_id=iri_id, ensemble_id=ensemble_id, cutting_id=cutting_id, element_id=element_id) + return render_to_response("segment_detail.html",{'segment':segment}, context_instance=RequestContext(request)) + +def segment_search_post(request): + query = request.POST["query"] + field = request.POST["field"] + operator = request.POST["operator"] + return segment_search_get(request, query, field, operator) + +def segment_search_get(request, query, field = u"tags", operator=u"or"): + + + content_type = request.GET.get("content-type", "application/json; charset=utf-8") + + response = HttpResponse(content_type=str(content_type)) + + # transform tag1, tag2, tag3 in "tag1" OR "tag2" OR "tag3" + query_str = (u" " + operator.upper() + u" ").join(["\"" + t.strip() + "\"" for t in re.split("\,|\;",query)]) + + + searcher = blinkster.ldt.utils.LdtSearch() + index_results = searcher.query(field, query_str) + + results = [] + + for index_res in index_results: + res = dict(index_res) + segs = Segment.objects.filter(iri_id=index_res["iri_id"], element_id=index_res["element_id"], cutting_id=index_res["decoupage_id"], ensemble_id=index_res["ensemble_id"]).values() + for seg in segs: + del seg["project_obj_id"] + del seg["content_id"] + del seg["id"] + res = dict(res, **seg) + res["segment_detail_url"] = unicode(settings.WEB_URL.rstrip('/') + reverse('blinkster.views.segment_detail',args=[res["iri_id"], res["ensemble_id"], res["cutting_id"], res["element_id"]])) + results.append(res) + + return serialize_queryset_to_json(response, results, {u"query" : query, u"field": field, u"operator":operator, u"final_query": query_str})