--- a/web/hdabo/views.py Mon Jul 11 17:20:30 2011 +0200
+++ b/web/hdabo/views.py Mon Jul 11 18:19:45 2011 +0200
@@ -12,7 +12,7 @@
from haystack.constants import DJANGO_ID
from haystack.query import SearchQuerySet
from hdabo.utils import OrderedDict, remove_accents, normalize
-from hdabo.wp_utils import (normalize_tag, query_wikipedia_title,
+from hdabo.wp_utils import (normalize_tag, query_wikipedia_title,
get_or_create_tag, process_tag, reorder_datasheet_tags)
from models import Datasheet, Organisation, Tag, TagCategory, TaggedSheet
from wikitools import wiki
@@ -26,19 +26,19 @@
orgas = Organisation.objects.all().order_by('name')
org_list = []
- all_ds_mapping = dict([(res['organisation'],res['nb_all']) for res in Datasheet.objects.values("organisation").annotate(nb_all=Count("organisation"))])
- validated_ds_mapping = dict([(res['organisation'],[res['nb_val'],res['first_id_val']]) for res in Datasheet.objects.filter(validated=True).values("organisation").annotate(nb_val=Count("organisation")).annotate(first_id_val=Min("id"))])
- unvalidated_ds_mapping = dict([(res['organisation'],[res['nb_unval'],res['first_id_unval']]) for res in Datasheet.objects.filter(validated=False).values("organisation").annotate(nb_unval=Count("organisation")).annotate(first_id_unval=Min("id"))])
+ all_ds_mapping = dict([(res['organisation'], res['nb_all']) for res in Datasheet.objects.values("organisation").annotate(nb_all=Count("organisation"))])
+ validated_ds_mapping = dict([(res['organisation'], [res['nb_val'], res['first_id_val']]) for res in Datasheet.objects.filter(validated=True).values("organisation").annotate(nb_val=Count("organisation")).annotate(first_id_val=Min("id"))])
+ unvalidated_ds_mapping = dict([(res['organisation'], [res['nb_unval'], res['first_id_unval']]) for res in Datasheet.objects.filter(validated=False).values("organisation").annotate(nb_unval=Count("organisation")).annotate(first_id_unval=Min("id"))])
# We get the hda_id from the id
- val_hda_ids = dict([(res.id,res.hda_id) for res in Datasheet.objects.filter(id__in=[v[1] for v in validated_ds_mapping.values()]).only("id","hda_id")])
- unval_hda_ids = dict([(res.id,res.hda_id) for res in Datasheet.objects.filter(id__in=[v[1] for v in unvalidated_ds_mapping.values()]).only("id","hda_id")])
+ val_hda_ids = dict([(res.id, res.hda_id) for res in Datasheet.objects.filter(id__in=[v[1] for v in validated_ds_mapping.values()]).only("id", "hda_id")])
+ unval_hda_ids = dict([(res.id, res.hda_id) for res in Datasheet.objects.filter(id__in=[v[1] for v in unvalidated_ds_mapping.values()]).only("id", "hda_id")])
for orga in orgas :
- nb_all = all_ds_mapping.get(orga.id,0)
- duo_val = validated_ds_mapping.get(orga.id,[0,None])
+ nb_all = all_ds_mapping.get(orga.id, 0)
+ duo_val = validated_ds_mapping.get(orga.id, [0, None])
nb_val = duo_val[0]
first_id_val = val_hda_ids.get(duo_val[1], None)
- duo_unval = unvalidated_ds_mapping.get(orga.id,[0,None])
+ duo_unval = unvalidated_ds_mapping.get(orga.id, [0, None])
nb_unval = duo_unval[0]
first_id_unval = unval_hda_ids.get(duo_unval[1], None)
org_list.append({'organisation':orga, 'nb_all':nb_all, 'nb_val':nb_val, 'first_id_val':first_id_val, 'nb_unval':nb_unval, 'first_id_unval':first_id_unval})
@@ -73,7 +73,7 @@
tag = None
if "tag" in request.GET :
tag = Tag.objects.get(id=int(request.GET["tag"]))
- datasheets_qs = Datasheet.objects.filter(tags__in=[tag]).order_by("organisation__name","original_creation_date").select_related("format")
+ datasheets_qs = Datasheet.objects.filter(tags__in=[tag]).order_by("organisation__name", "original_creation_date").select_related("format")
# If tag is set and if ds_id is None, it means that we have to display the first ds
if not ds_id :
index = 0
@@ -94,13 +94,13 @@
next_index = 1
next_id = select_qs[1].hda_id
elif index == (nb_sheets - 1) :
- select_qs = datasheets_qs[nb_sheets-2:]
+ select_qs = datasheets_qs[nb_sheets - 2:]
prev_index = nb_sheets - 2
prev_id = select_qs[0].hda_id
next_index = nb_sheets - 1
next_id = select_qs[1].hda_id
else :
- select_qs = datasheets_qs[index-1:index+2]
+ select_qs = datasheets_qs[index - 1:index + 2]
prev_index = index - 1
prev_id = select_qs[0].hda_id
next_index = index + 1
@@ -121,7 +121,7 @@
{'ds':ds, 'orga_name':ds.organisation.name,
'nb_sheets':nb_sheets, 'ordered_tags':ordered_tags,
'zero_id':zero_id, 'prev_index':prev_index, 'prev_id':prev_id,
- 'next_index':next_index, 'next_id':next_id,
+ 'next_index':next_index, 'next_id':next_id,
'last_index':last_index, 'last_id':last_id,
'displayed_index':displayed_index, 'tag':tag, 'valid':ds.validated,
'categories':json.dumps(get_categories())},
@@ -211,7 +211,7 @@
finally:
cursor.close()
- search_def = tuple([(c,urlquote(c + settings.SEARCH_STAR_CHARACTER)) for c in fl_list])
+ search_def = tuple([(c, urlquote(c + settings.SEARCH_STAR_CHARACTER)) for c in fl_list])
return render_to_response("all_tags.html",
{'nb_total':p.count, 'tags':current_page.object_list, 'current_page':current_page,
@@ -282,14 +282,14 @@
if searched and searched != "" :
searched = normalize(searched.strip())
- regex = "^%s$" % (re.escape(searched).replace(re.escape(settings.SEARCH_STAR_CHARACTER),".*"))
+ regex = "^%s$" % (re.escape(searched).replace(re.escape(settings.SEARCH_STAR_CHARACTER), ".*"))
base_queryset = base_queryset.filter(normalized_label__iregex=regex)
alltags = base_queryset.annotate(num_ds=Count('datasheet'))
- if alpha and (alpha=="true" or alpha=="1") :
+ if alpha and (alpha == "true" or alpha == "1") :
alltags = alltags.order_by('normalized_label', 'label')
else :
- alltags = alltags.order_by('-popularity','-num_ds', 'normalized_label', 'label')
+ alltags = alltags.order_by('-popularity', '-num_ds', 'normalized_label', 'label')
#alltags = alltags.order_by('-popularity','label')
# We build the paginator for the requested list
@@ -359,7 +359,10 @@
tag.wikipedia_pageid = pageid
tag.dbpedia_uri = dbpedia_uri
- tag.save()
+ try:
+ tag.save()
+ except:
+ return HttpResponseBadRequest(json.dumps({'error': 'duplicate_tag', 'message': u"Le tag %s (%s) existe déjà." % (tag_label, tag.original_label)}), mimetype="application/json")
if old_pageid != pageid:
TaggedSheet.objects.filter(tag=tag).update(wikipedia_revision_id=revision_id)
@@ -418,7 +421,10 @@
tag.wikipedia_url = None
tag.wikipedia_pageid = None
- process_tag(site, tag)
+ try:
+ process_tag(site, tag)
+ except:
+ return HttpResponseBadRequest(json.dumps({'error': 'duplicate_tag', 'message': u"La version sémantisée du tag %s (%s) existe déjà." % (tag.label, tag.original_label)}), mimetype="application/json")
@@ -471,6 +477,9 @@
TaggedSheet.objects.filter(tag=tag, datasheet__hda_id=request.POST["datasheet_id"]).update(tag=new_tag, wikipedia_revision_id=None)
else:
+
+ if Tag.objects.filter(label=tag.label, original_label=tag.original_label, url_status=Tag.TAG_URL_STATUS_DICT['null_result']).count() > 0:
+ return HttpResponseBadRequest(json.dumps({'error': 'duplicate_tag', 'message': u"La version désémantisée du tag %s (%s) existe déjà." % (tag.label, tag.original_label)}), mimetype="application/json")
tag.wikipedia_url = None
tag.wikipedia_pageid = None
tag.dbpedia_uri = None