| author | cavaliet |
| Thu, 02 Oct 2014 10:37:44 +0200 | |
| changeset 142 | 34ad53eecb18 |
| parent 137 | 02e81a3fc99b |
| child 148 | 62e750c7ef85 |
| permissions | -rwxr-xr-x |
| 2 | 1 |
''' |
| 3 | 2 |
Created on 2014-02-21 |
| 2 | 3 |
|
| 3 | 4 |
@author: tcavalie |
| 2 | 5 |
''' |
| 106 | 6 |
|
7 |
from django.conf import settings |
|
8 |
from django.contrib.contenttypes.models import ContentType |
|
9 |
from django.db.models import Q |
|
10 |
from django.db.models.aggregates import Count |
|
11 |
from django.db.models.query import RawQuerySet |
|
12 |
from django.http.response import HttpResponse |
|
|
137
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
13 |
from django.shortcuts import get_object_or_404 |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
14 |
from django.views.generic import TemplateView, View |
| 106 | 15 |
from itertools import groupby |
16 |
from ldt.indexation import get_results_with_context |
|
|
137
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
17 |
from ldt.ldt_utils.models import Segment, Content, Project |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
18 |
from ldt.ldt_utils.projectserializer import ProjectJsonSerializer |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
19 |
from ldt.ldt_utils.views.json import project_json_id |
| 106 | 20 |
from ldt.ldt_utils.views.workspace import get_search_results |
21 |
from operator import itemgetter |
|
22 |
from taggit.models import Tag, TaggedItem |
|
| 23 | 23 |
import json |
| 31 | 24 |
import re |
| 11 | 25 |
import time |
|
137
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
26 |
import uuid |
| 2 | 27 |
|
28 |
||
| 9 | 29 |
import logging |
| 2 | 30 |
logger = logging.getLogger(__name__) |
31 |
||
| 3 | 32 |
|
| 2 | 33 |
|
| 3 | 34 |
class Home(TemplateView): |
35 |
template_name = "spel_home.html" |
|
| 11 | 36 |
def get(self, request): |
37 |
context = {} |
|
38 |
return self.render_to_response(context) |
|
39 |
||
40 |
||
41 |
||
| 68 | 42 |
class Theatre(TemplateView): |
43 |
template_name = "spel_theatre.html" |
|
| 2 | 44 |
def get(self, request): |
| 11 | 45 |
# Get start and end for date bounds (earliest is available only on django 1.6) |
| 84 | 46 |
start_date = Content.objects.filter(tags__name__in=["content_theatre"]).order_by("content_creation_date")[0].content_creation_date |
47 |
end_date = Content.objects.filter(tags__name__in=["content_theatre"]).latest("content_creation_date").content_creation_date |
|
48 |
context = {"start_date":start_date, "end_date":end_date } |
|
49 |
return self.render_to_response(context) |
|
50 |
||
51 |
||
52 |
||
53 |
class Opera(TemplateView): |
|
54 |
template_name = "spel_opera.html" |
|
55 |
def get(self, request): |
|
56 |
# Get start and end for date bounds (earliest is available only on django 1.6) |
|
57 |
start_date = Content.objects.filter(tags__name__in=["content_opera"]).order_by("content_creation_date")[0].content_creation_date |
|
58 |
end_date = Content.objects.filter(tags__name__in=["content_opera"]).latest("content_creation_date").content_creation_date |
|
| 30 | 59 |
context = {"start_date":start_date, "end_date":end_date } |
| 2 | 60 |
return self.render_to_response(context) |
61 |
||
| 9 | 62 |
|
| 11 | 63 |
|
| 9 | 64 |
class ChapterRequest(TemplateView): |
65 |
||
66 |
template_name = "partial/spel_chapters.html" |
|
67 |
||
68 |
def get(self, request): |
|
| 11 | 69 |
# Filter content by date if necessary |
| 86 | 70 |
content_qs = Content.objects.filter(tags__name__in=["content_theatre"]) |
| 11 | 71 |
start_date_param = request.GET.get("start_date", "") |
72 |
if start_date_param!="": |
|
73 |
content_qs = content_qs.filter(content_creation_date__gt=start_date_param) |
|
74 |
end_date_param = request.GET.get("end_date", "") |
|
75 |
if end_date_param!="": |
|
76 |
content_qs = content_qs.filter(content_creation_date__lt=end_date_param + " 23:59:59") |
|
77 |
iri_ids = content_qs.values_list("iri_id", flat=True) |
|
| 12 | 78 |
#logger.debug("iri_ids") |
79 |
#logger.debug(iri_ids) |
|
| 10 | 80 |
# Filter segment if necessary |
81 |
annot_types_param = request.GET.get("annotation_types", "") |
|
| 65 | 82 |
seg_queryset = Segment.objects.filter(iri_id__in=iri_ids).select_related("content__title")#.prefetch_related("tags") |
| 22 | 83 |
annot_types = [] |
| 10 | 84 |
if annot_types_param!="": |
85 |
annot_types = annot_types_param.split(",") |
|
| 12 | 86 |
seg_queryset = seg_queryset.filter(cutting_id__in=annot_types) |
| 9 | 87 |
|
88 |
# First we look at modalites_sceniques and personnages tags. |
|
89 |
mod_scen_param = request.GET.get("modalites_sceniques", "") |
|
90 |
mod_scen = [] |
|
91 |
if mod_scen_param!="": |
|
92 |
mod_scen = mod_scen_param.split(",") |
|
93 |
perso_param = request.GET.get("personnages", "") |
|
94 |
perso = [] |
|
95 |
if perso_param!="": |
|
96 |
perso = perso_param.split(",") |
|
97 |
||
| 29 | 98 |
# Tags from start text and end text |
99 |
start_text = request.GET.get("start_text", "") |
|
100 |
end_text = request.GET.get("end_text", "") |
|
101 |
ref_text = None |
|
| 36 | 102 |
searched_ref_text = None |
| 29 | 103 |
if (start_text!="" and start_text!="start") or (end_text!="" and end_text!="end"): |
| 31 | 104 |
# First get all ref_text values |
105 |
rt_tags = Tag.objects.filter(name__startswith="ref_text:") |
|
106 |
ref_text_int = [] |
|
107 |
for rt in rt_tags: |
|
108 |
m = re.match(r"^(\d+)-(\d+)$", rt.name[10:]) |
|
109 |
if m and len(m.groups())==2: |
|
110 |
ref_text_int.append((int(m.group(1)), int(m.group(2)))) |
|
111 |
ref_text_int.sort() |
|
112 |
all_ref_text = [(str(one) + "-" + str(two)) for (one,two) in ref_text_int] |
|
113 |
||
| 29 | 114 |
# At least start or end have been defined, so we search for the concerned chapter ids. |
115 |
start_index = 0 |
|
116 |
if start_text!="start": |
|
117 |
try: |
|
118 |
start_index = all_ref_text.index(start_text) |
|
119 |
except: |
|
120 |
pass |
|
121 |
end_index = len(all_ref_text) |
|
122 |
if end_text!="end": |
|
123 |
try: |
|
124 |
end_index = all_ref_text.index(end_text) + 1 |
|
125 |
except: |
|
126 |
pass |
|
127 |
searched_ref_text = all_ref_text[start_index:end_index] |
|
128 |
ref_text = Tag.objects.filter(name__in=[ ("ref_text: " + rt) for rt in searched_ref_text ]) |
|
129 |
||
| 9 | 130 |
# Get tags from orm |
| 31 | 131 |
all_tags = mod_scen + perso |
| 9 | 132 |
tags = Tag.objects.filter(name__in=all_tags) |
| 65 | 133 |
# seg_queryset.filter(tags__in=tags) doesn't work because taggit finds segments with one of the tags and not ALL tags |
134 |
# So we make a correct request through TaggedItem first |
|
| 29 | 135 |
# Ref text filter if possible |
136 |
if ref_text and len(ref_text)>0: |
|
137 |
# a chapter can only have on ref_text, and the search on ref_text is a OR. |
|
| 31 | 138 |
# That's many requests (with the orm) but it seems the only thing possible with tagging |
| 29 | 139 |
s = [] |
140 |
for rt in ref_text: |
|
141 |
current_tags = list(tags) |
|
142 |
current_tags.append(rt) |
|
| 65 | 143 |
#s += list(TaggedItem.objects.get_by_model(seg_queryset, current_tags)) |
144 |
seg_ids = list(TaggedItem.objects\ |
|
145 |
.values_list("object_id", flat=True)\ |
|
146 |
.filter(content_type=ContentType.objects.get_for_model(Segment))\ |
|
147 |
.filter(tag__in=current_tags)\ |
|
148 |
.annotate(count_status=Count('object_id'))\ |
|
149 |
.filter(count_status=len(current_tags))) |
|
150 |
s += list(seg_queryset.filter(pk__in=seg_ids)) |
|
| 29 | 151 |
else: |
152 |
# Get segments from tagged items |
|
| 65 | 153 |
#s = TaggedItem.objects.get_by_model(seg_queryset, tags) |
154 |
tags = list(tags) |
|
155 |
seg_ids = list(TaggedItem.objects\ |
|
156 |
.values_list("object_id", flat=True)\ |
|
157 |
.filter(content_type=ContentType.objects.get_for_model(Segment))\ |
|
158 |
.filter(tag__in=tags)\ |
|
159 |
.annotate(count_status=Count('object_id'))\ |
|
160 |
.filter(count_status=len(tags))) |
|
161 |
s = list(seg_queryset.filter(pk__in=seg_ids)) |
|
| 9 | 162 |
|
| 36 | 163 |
context = {"annot_types":annot_types, "start_date":start_date_param, "end_date":end_date_param, |
164 |
"mod_scen":mod_scen, "perso":perso, "searched_ref_text":searched_ref_text, "segments": s} |
|
| 9 | 165 |
|
166 |
return self.render_to_response(context) |
|
| 23 | 167 |
|
168 |
def post(self, request): |
|
169 |
annotations_param = request.POST.get("annotations", "[]") |
|
170 |
grouped_annotations = [] |
|
171 |
for iri_id, items in groupby(json.loads(annotations_param), itemgetter('iri_id')): |
|
172 |
# Get segments timecodes |
|
173 |
s = [ int(i["data"]) for i in items ] |
|
174 |
grouped_annotations.append({ 'content': iri_id, 'list': s }) |
|
175 |
if len(grouped_annotations)==0: |
|
176 |
return HttpResponse("") |
|
177 |
# Request segment : we make a complex query impossible (?) to do with orm, even with Q, Sum and other stuffs. |
|
178 |
# Here is a SQL example of what we want : |
|
179 |
# select ldt_utils_segment.id, ldt_utils_segment.cutting_id, ldt_utils_segment.tags, ldt_utils_segment.start_ts, ldt_utils_segment.duration, ldt_utils_content.title AS ct |
|
180 |
# from ldt_utils_segment |
|
181 |
# INNER JOIN ldt_utils_content ON (ldt_utils_segment.content_id = ldt_utils_content.id) |
|
182 |
# where cutting_id IN ('performance','discussion') |
|
183 |
# AND ( |
|
184 |
# ( ldt_utils_segment.iri_id='CONTENT_ID_1' AND ( |
|
185 |
# ( ldt_utils_segment.start_ts < TIMECODE_1 AND TIMECODE_1 < (ldt_utils_segment.start_ts + ldt_utils_segment.duration) ) |
|
186 |
# OR |
|
187 |
# ( ldt_utils_segment.start_ts < TIMECODE_2 AND TIMECODE_2 < (ldt_utils_segment.start_ts + ldt_utils_segment.duration) ) |
|
188 |
# )) |
|
189 |
# OR |
|
190 |
# ( ldt_utils_segment.iri_id='CONTENT_ID_2' AND ( |
|
191 |
# ( ldt_utils_segment.start_ts < TIMECODE_3 AND TIMECODE_3 < (ldt_utils_segment.start_ts + ldt_utils_segment.duration) ) |
|
192 |
# )) |
|
193 |
# ) |
|
194 |
||
| 65 | 195 |
raw_query = "select ldt_utils_segment.id, ldt_utils_segment.cutting_id, ldt_utils_segment.start_ts, ldt_utils_segment.duration, ldt_utils_content.title AS ct \nfrom ldt_utils_segment \nINNER JOIN ldt_utils_content ON (ldt_utils_segment.content_id = ldt_utils_content.id) \nwhere cutting_id IN ('performance','discussion') \nAND (" |
| 23 | 196 |
for i, ga in enumerate(grouped_annotations): |
197 |
if i>0: |
|
198 |
raw_query += "\n OR " |
|
199 |
raw_query += "\n ( ldt_utils_segment.iri_id='" + ga["content"] + "' AND (" |
|
200 |
for j, tc in enumerate(ga["list"]): |
|
201 |
if j>0: |
|
202 |
raw_query += "\n OR " |
|
203 |
raw_query += "\n ( ldt_utils_segment.start_ts < " + str(tc) + " AND " + str(tc) + " < (ldt_utils_segment.start_ts + ldt_utils_segment.duration) )" |
|
204 |
raw_query += "\n )) " |
|
205 |
raw_query += "\n) " |
|
206 |
||
| 36 | 207 |
context = {"annot_chapters": True, "start_date":"", "end_date":"", "annot_types": [], "mod_scen":[], "perso":[], "segments": list(Segment.objects.raw(raw_query)) } |
| 23 | 208 |
return self.render_to_response(context) |
| 9 | 209 |
|
| 13 | 210 |
|
211 |
||
212 |
class AnnotationRequest(TemplateView): |
|
213 |
||
214 |
template_name = "partial/spel_annotations.html" |
|
215 |
||
216 |
def get(self, request): |
|
217 |
q = request.GET.get("q", "") |
|
218 |
page = 1 |
|
| 86 | 219 |
content_list = Content.objects.filter(tags__name__in=["content_theatre"]) |
| 15 | 220 |
if q!="": |
221 |
field = "abstract" |
|
222 |
results, nb_contents, nb_segments = get_search_results(request, q, field, page, content_list) |
|
223 |
results = results.object_list |
|
| 21 | 224 |
type_inter_param = "" |
| 15 | 225 |
else: |
| 21 | 226 |
type_inter_param = request.GET.get("type_inter", "") |
227 |
seg_queryset = [] |
|
| 15 | 228 |
results = [] |
| 21 | 229 |
tagged_segs = [] |
230 |
if type_inter_param!="": |
|
231 |
type_inter = [("type_inter: " + t) for t in type_inter_param.split(",")] |
|
232 |
tags = Tag.objects.filter(name__in=type_inter) |
|
233 |
# Get segments from tagged items |
|
| 65 | 234 |
#tagged_segs = TaggedItem.objects.get_by_model(Segment, tags).values() |
235 |
tags = list(tags) |
|
236 |
seg_ids = list(TaggedItem.objects\ |
|
237 |
.values_list("object_id", flat=True)\ |
|
238 |
.filter(content_type=ContentType.objects.get_for_model(Segment))\ |
|
239 |
.filter(tag__in=tags)\ |
|
240 |
.annotate(count_status=Count('object_id'))\ |
|
241 |
.filter(count_status=len(tags))) |
|
242 |
tagged_segs = Segment.objects.filter(pk__in=seg_ids).prefetch_related("tags__name").values("pk", "tags__name", "project_id", "iri_id", "ensemble_id", "cutting_id", "element_id", "title", "duration", "start_ts", "author", "date", "abstract", "polemics", "id_hash", "audio_src", "audio_href") |
|
243 |
||
244 |
# Because of prefetch and values, we have to parse all items in order to create a list of tags for all items |
|
245 |
tagged_segs_dict = {} |
|
246 |
for s in tagged_segs: |
|
247 |
if s['pk'] not in tagged_segs_dict: |
|
248 |
tagged_segs_dict[s['pk']] = s |
|
249 |
tagged_segs_dict[s['pk']]["tags"] = [] |
|
250 |
tagged_segs_dict[s['pk']]["tags"].append(s['tags__name']) |
|
251 |
tagged_segs = tagged_segs_dict.values() |
|
252 |
||
| 23 | 253 |
all_contents = list(Content.objects.filter(iri_id__in=[s['iri_id'] for s in tagged_segs])) |
| 21 | 254 |
for iri_id, items in groupby(tagged_segs, itemgetter('iri_id')): |
255 |
# Get good content |
|
256 |
content = None |
|
257 |
content_filter = filter(lambda e: e.iri_id == iri_id, all_contents) |
|
258 |
if len(content_filter)>0: |
|
259 |
content = content_filter[0] |
|
260 |
if content is None: |
|
261 |
continue |
|
262 |
# Get segments |
|
263 |
s = list(items) |
|
264 |
results.append({ 'content': content, 'list': s }) |
|
| 15 | 265 |
nb_contents = len(results) |
266 |
nb_segments = len(tagged_segs) |
|
| 13 | 267 |
|
| 20 | 268 |
context = {"q":q, "searched_tags": type_inter_param.split(","), "results": results, "nb_contents": nb_contents, "nb_annotations": nb_segments} |
| 13 | 269 |
|
270 |
return self.render_to_response(context) |
|
| 24 | 271 |
|
272 |
def post(self, request): |
|
273 |
chapters_param = request.POST.get("chapters", "[]") |
|
274 |
grouped_chapters = [] |
|
275 |
for iri_id, items in groupby(json.loads(chapters_param), itemgetter('iri_id')): |
|
276 |
# Get segments timecodes |
|
277 |
s = [ {"start": int(i["start"]), "end": int(i["end"])} for i in items ] |
|
278 |
grouped_chapters.append({ 'content': iri_id, 'list': s }) |
|
279 |
if len(grouped_chapters)==0: |
|
280 |
return HttpResponse("") |
|
281 |
||
282 |
||
283 |
# Request segment : we make a complex query impossible (?) to do with orm, even with Q, Sum and other stuffs. |
|
284 |
# Here is a SQL example of what we want : |
|
285 |
# select * |
|
286 |
# from ldt_utils_segment |
|
287 |
# where cutting_id IN ('performance','discussion') |
|
288 |
# AND ( |
|
289 |
# ( ldt_utils_segment.iri_id='CONTENT_ID_1' AND ( |
|
290 |
# ( TIMECODE_START_1 < ldt_utils_segment.start_ts AND ldt_utils_segment.start_ts < TIMECODE_END_1 ) |
|
291 |
# OR |
|
292 |
# ( TIMECODE_START_2 < ldt_utils_segment.start_ts AND ldt_utils_segment.start_ts < TIMECODE_END_2 ) |
|
293 |
# )) |
|
294 |
# OR |
|
295 |
# ( ldt_utils_segment.iri_id='CONTENT_ID_2' AND ( |
|
296 |
# ( TIMECODE_START_3 < ldt_utils_segment.start_ts AND ldt_utils_segment.start_ts < TIMECODE_END_3 ) |
|
297 |
# )) |
|
298 |
# ) |
|
299 |
||
|
74
ca3207a1c6e3
v0.5.4 add play button for each chapter and annotation
cavaliet
parents:
68
diff
changeset
|
300 |
raw_query = "SELECT ldt_utils_segment.id, taggit_tag.name AS tags__name, ldt_utils_segment.iri_id, ldt_utils_segment.cutting_id, ldt_utils_segment.element_id, ldt_utils_segment.title, ldt_utils_segment.duration, ldt_utils_segment.start_ts, ldt_utils_segment.abstract \nFROM ldt_utils_segment \nLEFT OUTER JOIN taggit_taggeditem \nON (ldt_utils_segment.id = taggit_taggeditem.object_id) \nLEFT OUTER JOIN taggit_tag \nON (taggit_taggeditem.tag_id = taggit_tag.id) \nwhere cutting_id NOT IN ('performance','discussion') \nAND (" |
| 65 | 301 |
#raw_query = "select id, iri_id, cutting_id, start_ts, duration, title, abstract \nfrom ldt_utils_segment \nwhere cutting_id NOT IN ('performance','discussion') \nAND (" |
| 24 | 302 |
for i, ga in enumerate(grouped_chapters): |
303 |
if i>0: |
|
304 |
raw_query += "\n OR " |
|
305 |
raw_query += "\n ( ldt_utils_segment.iri_id='" + ga["content"] + "' AND (" |
|
306 |
for j, tc in enumerate(ga["list"]): |
|
307 |
if j>0: |
|
308 |
raw_query += "\n OR " |
|
309 |
raw_query += "\n ( " + str(tc["start"]) + " < ldt_utils_segment.start_ts AND ldt_utils_segment.start_ts < " + str(tc["end"]) + " )" |
|
310 |
raw_query += "\n )) " |
|
311 |
raw_query += "\n)" |
|
312 |
||
313 |
# Dict because of itemgetter for groupby |
|
314 |
tagged_segs = [ dict(s.__dict__) for s in Segment.objects.raw(raw_query) ] |
|
| 65 | 315 |
# Because of taggit_tag.name JOIN, we have to parse all items in order to create a list of tags for all items |
316 |
tagged_segs_dict = {} |
|
317 |
for s in tagged_segs: |
|
318 |
if s['id'] not in tagged_segs_dict: |
|
319 |
tagged_segs_dict[s['id']] = s |
|
320 |
tagged_segs_dict[s['id']]["tags"] = [] |
|
321 |
tagged_segs_dict[s['id']]["tags"].append(s['tags__name']) |
|
322 |
tagged_segs = tagged_segs_dict.values() |
|
323 |
||
| 24 | 324 |
# Prefetch all contents |
325 |
all_contents = list(Content.objects.filter(iri_id__in=[s['iri_id'] for s in tagged_segs])) |
|
326 |
results = [] |
|
327 |
for iri_id, items in groupby(tagged_segs, itemgetter('iri_id')): |
|
328 |
# Get good content |
|
329 |
content = None |
|
330 |
content_filter = filter(lambda e: e.iri_id == iri_id, all_contents) |
|
331 |
if len(content_filter)>0: |
|
332 |
content = content_filter[0] |
|
333 |
if content is None: |
|
334 |
continue |
|
335 |
# Get segments |
|
336 |
s = list(items) |
|
337 |
results.append({ 'content': content, 'list': s }) |
|
338 |
nb_contents = len(results) |
|
339 |
nb_segments = len(tagged_segs) |
|
340 |
||
341 |
context = {"chapter_annots":True, "q":"", "searched_tags": [], "results": results, "nb_contents": nb_contents, "nb_annotations": nb_segments} |
|
342 |
||
343 |
return self.render_to_response(context) |
|
| 13 | 344 |
|
345 |
||
| 91 | 346 |
class OperaRequest(TemplateView): |
347 |
||
348 |
template_name = "partial/spel_opera_annotations.html" |
|
349 |
||
350 |
def get(self, request): |
|
351 |
# Filter content by date if necessary |
|
352 |
content_qs = Content.objects.filter(tags__name__in=["content_opera"]) |
|
353 |
start_date_param = request.GET.get("start_date", "") |
|
354 |
if start_date_param!="": |
|
355 |
content_qs = content_qs.filter(content_creation_date__gt=start_date_param) |
|
356 |
end_date_param = request.GET.get("end_date", "") |
|
357 |
if end_date_param!="": |
|
358 |
content_qs = content_qs.filter(content_creation_date__lt=end_date_param + " 23:59:59") |
|
| 106 | 359 |
|
360 |
# Query on abstract management |
|
361 |
q = request.GET.get("q", "").strip() |
|
362 |
if q!="": |
|
363 |
# Query case : we get segments by the search engine |
|
364 |
field = "abstract" |
|
365 |
results = get_results_with_context(Segment, field, q, content_qs) |
|
366 |
seg_queryset = Segment.objects.filter(pk__in=[e['indexation_id'] for e in results]).select_related("content__title") |
|
367 |
else: |
|
368 |
# Filter segment if necessary |
|
369 |
iri_ids = content_qs.values_list("iri_id", flat=True) |
|
370 |
seg_queryset = Segment.objects.filter(iri_id__in=iri_ids).select_related("content__title") |
|
371 |
||
| 91 | 372 |
annot_types_param = request.GET.get("annotation_types", "") |
373 |
annot_types = [] |
|
374 |
if annot_types_param!="": |
|
375 |
annot_types = annot_types_param.split(",") |
|
376 |
seg_queryset = seg_queryset.filter(cutting_id__in=annot_types) |
|
377 |
||
378 |
# First we look at modalites_sceniques and personnages tags. |
|
379 |
mod_scen_param = request.GET.get("modalites_sceniques", "") |
|
380 |
mod_scen = [] |
|
381 |
if mod_scen_param!="": |
|
382 |
mod_scen = mod_scen_param.split(",") |
|
383 |
perso_param = request.GET.get("personnages", "") |
|
384 |
perso = [] |
|
385 |
if perso_param!="": |
|
386 |
perso = perso_param.split(",") |
|
387 |
type_travail_param = request.GET.get("type_travail", "") |
|
388 |
type_travail = [] |
|
389 |
if type_travail_param!="": |
|
390 |
type_travail = type_travail_param.split(",") |
|
391 |
acte_param = request.GET.get("acte", "") |
|
392 |
acte = [] |
|
393 |
if acte_param!="": |
|
394 |
acte = acte_param.split(",") |
|
395 |
scene_param = request.GET.get("scene", "") |
|
396 |
scene = [] |
|
397 |
if scene_param!="": |
|
398 |
scene = scene_param.split(",") |
|
399 |
||
| 106 | 400 |
# Mesure management |
| 99 | 401 |
start_mesure = request.GET.get("start_mesure", "") |
402 |
end_mesure = request.GET.get("end_mesure", "") |
|
403 |
mesure = None |
|
404 |
mesure_int = None |
|
405 |
if start_mesure=="start" and end_mesure=="": |
|
406 |
mesure = [] |
|
407 |
mesure_int = [] |
|
408 |
elif start_mesure!="start" and end_mesure=="": |
|
409 |
# Only one mesure has been defined |
|
410 |
mesure = [Tag.objects.get(name="opera_mesure: " + start_mesure)] |
|
411 |
mesure_int = [start_mesure] |
|
412 |
elif end_mesure!="": |
|
413 |
# First get all ref_text values |
|
414 |
m_tags = Tag.objects.filter(name__startswith="opera_mesure:") |
|
415 |
mesure_int = [] |
|
416 |
for m in m_tags: |
|
417 |
tested = re.match(r"^(\d+)$", m.name[14:]) |
|
418 |
if tested and len(tested.groups())==1: |
|
419 |
mesure_int.append(int(tested.group(1))) |
|
420 |
mesure_int.sort() |
|
421 |
||
422 |
# At least start or end have been defined, so we search for the concerned chapter ids. |
|
423 |
start_index = 0 |
|
424 |
if start_mesure!="start": |
|
425 |
try: |
|
426 |
start_index = mesure_int.index(int(start_mesure)) |
|
427 |
except: |
|
428 |
pass |
|
429 |
end_index = len(mesure_int) |
|
430 |
if end_mesure!="end": |
|
431 |
try: |
|
432 |
end_index = mesure_int.index(int(end_mesure)) + 1 |
|
433 |
except: |
|
434 |
pass |
|
435 |
mesure_int = mesure_int[start_index:end_index] |
|
436 |
mesure = Tag.objects.filter(name__in=[ ("opera_mesure: " + str(m)) for m in mesure_int ]) |
|
| 91 | 437 |
|
438 |
# Get tags from orm |
|
| 99 | 439 |
all_tags = mod_scen + perso + type_travail + acte + scene |
| 91 | 440 |
tags = Tag.objects.filter(name__in=all_tags) |
| 99 | 441 |
|
| 91 | 442 |
# seg_queryset.filter(tags__in=tags) doesn't work because taggit finds segments with one of the tags and not ALL tags |
443 |
# So we make a correct request through TaggedItem first |
|
| 99 | 444 |
# Mesure filter if possible |
445 |
if mesure and len(mesure)>0: |
|
446 |
# a chapter can only have on ref_text, and the search on ref_text is a OR. |
|
447 |
# That's many requests (with the orm) but it seems the only thing possible with tagging |
|
448 |
s = [] |
|
449 |
for m in mesure: |
|
450 |
current_tags = list(tags) |
|
451 |
current_tags.append(m) |
|
452 |
#s += list(TaggedItem.objects.get_by_model(seg_queryset, current_tags)) |
|
453 |
seg_ids = list(TaggedItem.objects\ |
|
454 |
.values_list("object_id", flat=True)\ |
|
455 |
.filter(content_type=ContentType.objects.get_for_model(Segment))\ |
|
456 |
.filter(tag__in=current_tags)\ |
|
457 |
.annotate(count_status=Count('object_id'))\ |
|
458 |
.filter(count_status=len(current_tags))) |
|
459 |
s += list(seg_queryset.filter(pk__in=seg_ids)) |
|
460 |
else: |
|
461 |
# Get segments from tagged items |
|
462 |
#s = TaggedItem.objects.get_by_model(seg_queryset, tags) |
|
463 |
tags = list(tags) |
|
| 106 | 464 |
if len(tags)>0: |
465 |
seg_ids = list(TaggedItem.objects\ |
|
466 |
.values_list("object_id", flat=True)\ |
|
467 |
.filter(content_type=ContentType.objects.get_for_model(Segment))\ |
|
468 |
.filter(tag__in=tags)\ |
|
469 |
.annotate(count_status=Count('object_id'))\ |
|
470 |
.filter(count_status=len(tags))) |
|
471 |
s = list(seg_queryset.filter(pk__in=seg_ids)) |
|
472 |
else: |
|
473 |
s = list(seg_queryset) |
|
| 91 | 474 |
|
475 |
context = {"annot_types":annot_types, "start_date":start_date_param, "end_date":end_date_param, |
|
476 |
"mod_scen":mod_scen, "perso":perso, "type_travail":type_travail, "acte":acte, |
|
| 106 | 477 |
"scene":scene, "mesure":mesure_int, "q":q, "segments": s} |
| 91 | 478 |
|
479 |
return self.render_to_response(context) |
|
| 13 | 480 |
|
481 |
||
482 |
||
483 |
||
|
137
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
484 |
class JsonRdfExport(View): |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
485 |
|
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
486 |
def get(self, request, id): |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
487 |
|
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
488 |
""" |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
489 |
This helper module allows to convert advene 2packages from one format to another |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
490 |
without having to depend on the whole advene2 library. |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
491 |
It does so by invoking advene-convert as a subprocess. |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
492 |
""" |
| 13 | 493 |
|
494 |
||
|
137
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
495 |
format = request.GET.get("format","") |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
496 |
if format == "rdf" or format == "ttl": |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
497 |
from libadvene.model.cam.package import Package |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
498 |
from libadvene.model.parsers.cinelab_json import Parser |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
499 |
from libadvene.model.serializers.cinelab_rdf import serialize_to as serialize_to_rdf |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
500 |
from libadvene.model.serializers.cinelab_ttl import serialize_to as serialize_to_ttl |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
501 |
|
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
502 |
project = get_object_or_404(Project, ldt_id=id) |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
503 |
ps = ProjectJsonSerializer(project) |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
504 |
project_dict = ps.serialize_to_cinelab() |
| 142 | 505 |
p = Package("http://spectacleenlignes.fr" + request.path, create=True) |
|
137
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
506 |
Parser.parse_into(project_dict, p) |
| 142 | 507 |
|
508 |
if format=="rdf": |
|
509 |
content_type = "application/rdf+xml" |
|
510 |
serialize_method= serialize_to_rdf |
|
511 |
elif format=="ttl": |
|
512 |
content_type = "text/turtle" |
|
513 |
serialize_method= serialize_to_ttl |
|
514 |
resp = HttpResponse(content_type=content_type) |
|
515 |
serialize_method(p, resp) |
|
516 |
return resp |
|
|
137
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
517 |
return project_json_id(request, id) |
|
02e81a3fc99b
update venv to allow turtle and rdf export thanks to libadvene
cavaliet
parents:
106
diff
changeset
|
518 |
|
| 2 | 519 |