rdf import correction + new version
authorymh <ymh.work@gmail.com>
Sat, 26 Sep 2015 11:55:11 +0200
changeset 649 434737bd64e5
parent 648 206859005b33
child 650 9d5642e1b249
rdf import correction + new version
src/hdabo/management/commands/import_rdf.py
src/hdalab/__init__.py
src/hdalab/management/commands/query_wikipedia_category.py
src/hdalab/views/profile.py
--- a/src/hdabo/management/commands/import_rdf.py	Thu Sep 24 13:40:54 2015 +0200
+++ b/src/hdabo/management/commands/import_rdf.py	Sat Sep 26 11:55:11 2015 +0200
@@ -319,14 +319,14 @@
 
     def do_import_graph(self):
         pass
-    
+
     def _process_attr_node(self, attr_node):
         if attr_node is None:
             return None
 
         if attr_node.is_resource():
             return str(attr_node.uri)
-        
+
         if attr_node.is_blank():
             new_g = RDF.Model()
             qs = RDF.Statement(subject=attr_node, predicate=None, object=None)
@@ -352,7 +352,7 @@
             return self.attr_cache[name]
         if name not in self.attr_rdf_map:
             raise AttributeError("%s not in attributes", name)
-        
+
         res = None
         if isinstance(self.attr_rdf_map[name], (list, tuple, set)):
             res = reduce(
@@ -406,10 +406,10 @@
         if cat.natural_key != val:
             cat.natural_key = val
             dirty = True
-        
+
         if dirty :
             cat.save()
-            
+
         return [(cat, created, dirty), ]
 
 
@@ -435,12 +435,12 @@
         })
 
     def do_import_graph(self):
-        
+
         if self.id == "___":
             return []
-        
+
         tag, created = Tag.objects.get_or_create(label=self.label, original_label=self.original_label, url_status=self.url_status, defaults={'natural_key': self.id})
-        
+
         dirty = False
         tag.force_natural_key = True
         tag.natural_key = self.id
@@ -488,7 +488,7 @@
         })
 
     def do_import_graph(self):
-        
+
         domain, created = Domain.objects.get_or_create(natural_key=self.id, defaults={'label': self.value, 'school_period': Domain.DOMAIN_PERIOD_DICT[u'Global']})
 
         dirty = False
@@ -498,7 +498,7 @@
         if domain.school_period != Domain.DOMAIN_PERIOD_DICT[u'Global']:
             domain.school_period = Domain.DOMAIN_PERIOD_DICT[u'Global']
             dirty = True
-        
+
         if dirty:
             domain.save()
 
@@ -534,7 +534,7 @@
         if domain.school_period != ThemeImporter.LEVEL_MAP[self.level]:
             domain.school_period = ThemeImporter.LEVEL_MAP[self.level]
             dirty = True
-        
+
         if dirty:
             domain.save()
 
@@ -568,7 +568,7 @@
         if period.school_period != ThemeImporter.LEVEL_MAP[self.level]:
             period.school_period = ThemeImporter.LEVEL_MAP[self.level]
             dirty = True
-        
+
         if dirty:
             period.save()
 
@@ -577,18 +577,18 @@
 
 
 class SiteImporter(RdfImporter):
-    
+
     def __init__(self, g):
         super(SiteImporter, self).__init__(g, 'Site')
         self.attr_rdf_map.update({
             'name': RDFS_NS.label,
             'website': FOAF_NS.homepage,
         })
-    
+
     def do_import_graph(self):
-        
+
         org, created = Organisation.objects.get_or_create(hda_id = self.id)
-        
+
         dirty = False
         if self.name != org.name:
             org.name = self.name
@@ -604,7 +604,7 @@
 
 
 class InstitutionImporter(RdfImporter):
-    
+
     def __init__(self, g):
         super(InstitutionImporter, self).__init__(g, 'Institution')
         self.attr_rdf_map.update({
@@ -612,11 +612,11 @@
             'website': FOAF_NS.homepage,
             'location': HDA_NS.ville
         })
-    
+
     def do_import_graph(self):
-        
+
         org, created = Organisation.objects.get_or_create(hda_id = self.id)
-        
+
         dirty = False
         if self.name != org.name:
             org.name = self.name
@@ -649,7 +649,7 @@
             'wikipedia_revision_id': DBPEDIA_NS.wikiPageRevisionID,
             'tag': HDA_NS.tag
         })
-    
+
     def do_import_graph(self):
         tag = Tag.objects.filter(natural_key=self.tag.split("/")[-1]).first()
         if tag is None:
@@ -659,9 +659,9 @@
         if ts is None:
             ts = TaggedSheet.objects.create(tag=tag, datasheet=self.datasheet)
             created = True
-        
+
         dirty = False
-        
+
         val = self.created_at
         if ts.created_at.replace(tzinfo=pytz.UTC) != val:
             dirty = True
@@ -677,7 +677,7 @@
 
         if dirty:
             ts.save()
-        
+
         return [(ts,created,dirty),]
 
 
@@ -688,7 +688,7 @@
         #print("NOTICE IMPORTER")
         #ser = RDF.Serializer(name='turtle')
         #print(ser.serialize_model_to_string(g))
-        
+
         self.attr_rdf_map.update({
             'title': DC_NS.title,
             'description': DC_NS.description,
@@ -710,11 +710,11 @@
 
     @transaction.atomic
     def do_import_graph(self):
-        
-        
+
+
         ds, created = Datasheet.objects.get_or_create(hda_id=self.id, defaults={'original_creation_date': self.original_creation_date, 'original_modification_date': self.original_modification_date})
         dirty=False
-        
+
         res = []
 
         for field in ['title', 'description', 'url']:
@@ -727,13 +727,13 @@
             if getattr(ds, field) != val:
                 setattr(ds, field, val)
                 dirty = True
-        
+
         org_url = self.organisation
         org = None
         if org_url:
             org_id = org_url.split("/")[-1]
             org = Organisation.objects.filter(hda_id=org_id).first()
-        
+
         if org != ds.organisation:
             ds.organisation = org
             dirty = True
@@ -746,7 +746,7 @@
         if town != ds.town:
             ds.town = town
             dirty = True
-        
+
         fmt = self.format
         format_obj = None
         if fmt:
@@ -756,15 +756,15 @@
         if format_obj != ds.format:
             ds.format = format_obj
             dirty = True
-            
+
         if not ds.validated:
             ds.validated = True
             dirty = True
-        
+
         for (field, ObjKlass) in [('domains',Domain), ('primary_periods', TimePeriod), ('college_periods', TimePeriod), ('highschool_periods', TimePeriod), ('primary_themes', Domain), ('college_themes', Domain), ('highschool_themes', Domain)]:
             tgt_obj_ids = [obj_url.split("/")[-1] for obj_url in getattr(self, field)]
             tgt_obj_ids.sort()
-            
+
             ds_objs_ids = [d.natural_key for d in getattr(ds, field).all()]
             ds_objs_ids.sort()
             if ds_objs_ids !=  tgt_obj_ids:
@@ -775,17 +775,17 @@
                     link_args = {'datasheet': ds, ObjKlass.__name__.lower(): tgt_obj, 'sort_value': i}
                     ThroughKlass.objects.create(**link_args)
                 dirty = True
-        
+
         if dirty:
             ds.save()
-        
+
         res.append((ds, created, dirty))
-        
+
         #create TaggedSheet
         for tagged_sheet_graph in self.tags:
             importer = DocumentTagImporter(tagged_sheet_graph, ds)
             importer.import_graph()
-        
+
         return res
 
 
@@ -806,12 +806,11 @@
     '''
     Command to import csvfile
     '''
-    args = '<path_to_csv_file path_to_csv_file ...>'
+    args = '<path_to_rdf_file>'
     options = '[--type TYPE]'
-    help = """Import of a csv file for hdabo
+    help = """Import of a rdf file for hdabo
 Options:
-    --ignore-existing : ignore existing datasheets
-    --lines : max number of lines to load (for each file). 0 means all.
+    --type TYPE : type of object to import
 """
 
     option_list = BaseCommand.option_list + (
@@ -923,5 +922,3 @@
         print("")
 
         shutil.rmtree(graph_disk_cache.temp_folder)
-
-
--- a/src/hdalab/__init__.py	Thu Sep 24 13:40:54 2015 +0200
+++ b/src/hdalab/__init__.py	Sat Sep 26 11:55:11 2015 +0200
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import
 
-VERSION = (3, 0, 2, "final", 0)
+VERSION = (3, 0, 3, "final", 0)
 
 
 def get_version():
@@ -18,5 +18,3 @@
 __version__ = get_version()
 
 default_app_config = 'hdalab.apps.HdalabAppConfig'
-
-
--- a/src/hdalab/management/commands/query_wikipedia_category.py	Thu Sep 24 13:40:54 2015 +0200
+++ b/src/hdalab/management/commands/query_wikipedia_category.py	Sat Sep 26 11:55:11 2015 +0200
@@ -41,7 +41,7 @@
     '''
     options = ''
     help = """query and update wikipedia for tag title."""
-    
+
     option_list = NoArgsCommand.option_list + (
         make_option('--all',
             action='store_true',
@@ -96,116 +96,116 @@
             help='the tag to query'),
 
     )
-    
-    
-#    def process_wp_response(self, label, response):        
+
+
+#    def process_wp_response(self, label, response):
 #
 #        query_dict = response['query']
 #        # get page if multiple pages or none -> return Tag.null_result
 #        pages = query_dict.get("pages", {})
 #        if len(pages) > 1 or len(pages) == 0:
 #            return None, Tag.TAG_URL_STATUS_DICT["null_result"], None, None
-#        
+#
 #        page = pages.values()[0]
-#        
+#
 #        if u"invalid" in page or u"missing" in page:
 #            return None, Tag.TAG_URL_STATUS_DICT["null_result"], None, None
 #
 #        url = page.get(u'fullurl', None)
 #        pageid = page.get(u'pageid', None)
 #        new_label = page[u'title']
-#        
+#
 #        if self.__is_homonymie(page):
 #            status = Tag.TAG_URL_STATUS_DICT["homonyme"]
 #        elif u"redirect" in page:
 #            status = Tag.TAG_URL_STATUS_DICT["redirection"]
 #        else:
 #            status = Tag.TAG_URL_STATUS_DICT["match"]
-#        
-#        return new_label, status, url, pageid 
+#
+#        return new_label, status, url, pageid
 
     def query_all_categories(self, hidden, site, pageid, use_label):
-        
+
         clshow = 'hidden' if hidden else '!hidden'
         params = {'action':'query', 'titles' if use_label else 'pageids': pageid, 'prop':'categories', 'clshow': clshow}
-        
-        clcontinue = ""        
+
+        clcontinue = ""
         res = []
-        
+
         while clcontinue is not None:
             if clcontinue:
                 params['clcontinue'] = clcontinue
-                
+
             wpquery = api.APIRequest(site, params) #@UndefinedVariable
             response = wpquery.query()
-            
+
             if self.verbosity > 1:
                 print "Query infoboxes : " + repr(wpquery.request.get_full_url()+"?"+wpquery.request.get_data())
                 print repr(response)
 
-            
+
             query_dict = response.get('query', None)
-            
+
             if query_dict is None:
                 return res
-            
+
             pages = query_dict.get("pages", {})
             if len(pages) > 1 or len(pages) == 0:
                 return res
-            
+
             page = pages.values()[0]
-                        
+
             for cat in page.get('categories',[]):
                 title = cat.get('title',"")
                 title = title[title.find(":")+1:]
                 if title and clcontinue != ("%s|%s" % (pageid,title)):
                     res.append(title)
-            
+
             clcontinue = response.get('query-continue', {}).get('categories',{}).get('clcontinue', None)
 
         if self.verbosity > 1:
             print "Query infoboxes RES: "
             print repr(res)
-            
+
         return res
-    
+
     def process_categories(self, cat_list, hidden, tag):
-        
+
         for cat in cat_list:
             wp_cat,created = WpCategory.objects.get_or_create(label=cat) #@UnusedVariable
             TagWpCategory.objects.get_or_create(tag=tag, wp_category=wp_cat, hidden=hidden)
-            
-                
+
+
     def query_infoboxes(self, site, pageid, use_label):
-        
+
         res = []
         params = {'action':'query', 'titles' if use_label else 'pageids': pageid, 'prop':'revisions', 'rvprop': 'ids|content'}
         wpquery = api.APIRequest(site, params) #@UndefinedVariable
         response = wpquery.query()
-        
+
         query_dict = response.get('query', None)
-            
+
         if query_dict is None:
             return res
-            
+
         pages = query_dict.get("pages", {})
         if len(pages) > 1 or len(pages) == 0:
             return res
 
         page = pages.values()[0]
-        
+
         if 'revisions' not in page or not page['revisions']:
             return res
-        
+
         rev = page['revisions'][0]
-        
+
         content = rev['*']
-                
+
         start = 0
         depth = 0
         current_infobox_name = None
         current_start = 0
-        
+
         while start <= len(content):
             if depth==0:
                 resm = START_PATTERN.search(content[start:])
@@ -214,7 +214,7 @@
                 depth = 1
                 current_start = resm.start()+start
                 start += resm.end()+1
-                current_infobox_name = resm.group(1)                    
+                current_infobox_name = resm.group(1)
             else:
                 resm = END_PATTERN.search(content[start:])
                 if resm is None:
@@ -228,20 +228,20 @@
                 start += resm.end()+1
 
         return_val = (rev['revid'],res)
-        
+
         if self.verbosity > 1:
             print "Query infoboxes url: " + repr(wpquery.request.get_full_url()+"?"+wpquery.request.get_data())
             print repr(return_val)
-        
+
         return return_val
-    
+
     def split_infoboxes(self, src):
-        
+
         start = 0
         previous_end = 0
         split_indexes = []
         delimiter_stack = []
-        while start<=len(src):            
+        while start<=len(src):
             resd = DELIMITER_PATTERN.search(src[start:])
             ress = SPLIT_PATTERN.search(src[start:]) if len(delimiter_stack) == 0 else None
             startd = resd.start() if resd is not None else sys.maxint
@@ -260,7 +260,7 @@
                 start += resd.end()
             else:
                 break
-            
+
         if previous_end > 0:
             split_indexes.append((previous_end,len(src)))
         res = [src[start:end] for start,end in split_indexes]
@@ -269,14 +269,16 @@
 
 
     def process_infoboxes(self, infobox_defs, tag):
-        
+
         if not infobox_defs:
             return
-        
+
         revision_id = infobox_defs[0]
         for infobox in infobox_defs[1]:
-            src = infobox[0].strip(' \t\n\r')            
+            src = infobox[0].strip(' \t\n\r')
             name = infobox[1]
+            if name and len(name) > 2048:
+                name = name[0:2048]
             tag_infobox, created = TagInfobox.objects.get_or_create(tag=tag, name=name, revision_id = revision_id, defaults={'source': src})
             if not created:
                 tag_infobox.source = src
@@ -284,7 +286,7 @@
 
             src = COMMENT_PATTERN.sub('',src)
             src = START_PATTERN.sub('',src[:-2]).strip()
-            
+
             keyvalues = self.split_infoboxes(src)
 
             for key,value in itertools.izip(*[itertools.islice(keyvalues, i, None, 2) for i in range(2)]):
@@ -292,89 +294,89 @@
                 if not created:
                     param.param_value = value.strip()
                     param.save()
-        
+
     def handle_noargs(self, **options):
-        
+
         self.style = no_style()
-        
+
         interactive = options.get('interactive', True)
-        
+
         self.verbosity = int(options.get('verbosity', '1'))
         use_label = options.get('use_label', False)
-        
+
         force = options.get('force', False)
-        
+
         limit = options.get("limit", -1)
         start = options.get("start", 0)
-        
+
         site_url = options.get('site_url', settings.WIKIPEDIA_API_URL)
-        
+
         random = options.get('random', False)
-        
+
         types_mask = 0
         types_list = options.get('types', [])
-        
+
         if len(types_list) == 0:
             types_mask = TYPES_MASK_DICT['all']
         else:
             for t in types_list:
                 types_mask |=  TYPES_MASK_DICT[t]
-                
+
         if self.verbosity > 1 :
-            print "types mask %s " % (bin(types_mask))  
-        
+            print "types mask %s " % (bin(types_mask))
+
         if self.verbosity > 2:
             print "option passed : " + repr(options)
 
 
         queryset = Tag.objects.exclude(wikipedia_pageid= None)
-        
+
         tag_list = options.get("tags", []);
-        
+
         if tag_list:
             queryset = queryset.filter(label__in=tag_list)
-        elif not options.get('all',False):            
+        elif not options.get('all',False):
             queryset = queryset.annotate(wpc=Count('wp_categories')).filter(wpc = 0)
         #else:
-        #    queryset = Tag.objects.filter(url_status=None)                    
-        
+        #    queryset = Tag.objects.filter(url_status=None)
+
         if random:
             queryset = queryset.order_by("?")
         else:
             queryset = queryset.order_by("label")
-        
+
         if limit >= 0:
             queryset = queryset[start:limit]
         elif start > 0:
-            queryset = queryset[start:]            
-        
+            queryset = queryset[start:]
+
         if self.verbosity > 2 :
             print "Tag Query is %s" % (queryset.query)
-        
+
         site = wiki.Wiki(site_url) #@UndefinedVariable
-        
-        
+
+
         count = queryset.count()
         if self.verbosity > 1:
             print "Processing %d tags" % (count)
-        
+
         if not force and interactive:
             confirm = raw_input("You have requested to query and replace the wikipedia information for %d tags.\n Are you sure you want to do this? \nType 'yes' to continue, or 'no' to cancel: " % (count))
         else:
             confirm = 'yes'
-            
+
         if confirm != "yes":
             print "wikipedia query cancelled"
             return
 
-        
-        
+
+
         for i, tag in enumerate(queryset):
-            
+
             if self.verbosity > 1:
                 print "processing tag %s (%d/%d)" % (tag.label, i + 1, count)
             else:
-                utils.show_progress(i + 1, count, tag.label, 60)                            
+                utils.show_progress(i + 1, count, tag.label, 60)
 
             # query categories
             wikipedia_pageid = tag.label if use_label else tag.wikipedia_pageid
@@ -385,12 +387,11 @@
                 if types_mask & TYPES_MASK_DICT['visible']:
                     res = self.query_all_categories(False, site, wikipedia_pageid, use_label)
                     self.process_categories(res, False, tag)
-    
+
                 if types_mask & TYPES_MASK_DICT['hidden']:
                     res = self.query_all_categories(True, site, wikipedia_pageid, use_label)
                     self.process_categories(res, True, tag)
-                
+
                 if types_mask & TYPES_MASK_DICT['infobox']:
                     res = self.query_infoboxes(site, wikipedia_pageid, use_label)
                     self.process_infoboxes(res, tag)
-            
--- a/src/hdalab/views/profile.py	Thu Sep 24 13:40:54 2015 +0200
+++ b/src/hdalab/views/profile.py	Sat Sep 26 11:55:11 2015 +0200
@@ -48,12 +48,12 @@
 
 
 class BaseRenkanList(TemplateView):
-    
+
     default_sort_field = "date"
     default_sort_order = "desc"
 
     def update_context(self, context, renkan_queryset):
-        
+
         filters = ""
         filter_title = self.request.GET.get("title", "")
         if(len(filter_title)>0):
@@ -105,32 +105,32 @@
             page = p.page(1)
         except EmptyPage:
             page = p.page(p.num_pages)
-        
-        context.update({"page": page, "sort_param":sort_param, "order_param":order_param, "opposite":opposite, 
+
+        context.update({"page": page, "sort_param":sort_param, "order_param":order_param, "opposite":opposite,
                         "filters":filters, "title": filter_title, "username": filter_username, "state": filter_state,
                         "startdate":filter_startdate, "enddate":filter_enddate, "favorite": filter_favorite})
-        
+
         return context
 
 
 
 class ProfileHome(BaseRenkanList):
-    
+
     template_name = "profile_home.html"
-    
+
     def get_context_data(self, **kwargs):
         return self.update_context( super(ProfileHome, self).get_context_data(**kwargs), HdalabRenkan.objects.select_related("renkan").filter(renkan__owner=self.request.user))
 
 
 class RenkanPublicList(BaseRenkanList):
-    
+
     template_name = "renkan_list.html"
-    
+
     def get_context_data(self, **kwargs):
         context = super(RenkanPublicList, self).get_context_data(**kwargs)
         #Liste des renkans publics
         renkan_list = HdalabRenkan.objects
-        
+
         context['hide_favorite'] = True
         context['show_username'] = self.request.user.is_staff
         if self.request.user.is_staff:
@@ -142,13 +142,13 @@
 
 
 class RenkanFavoriteList(BaseRenkanList):
-    
+
     template_name = "renkan_list_favorite.html"
-    
+
     def get_context_data(self, **kwargs):
         context = super(RenkanFavoriteList, self).get_context_data(**kwargs)
         renkan_list = HdalabRenkan.objects
-        
+
         context['hide_favorite'] = True
         context['show_username'] = self.request.user.is_staff
         if self.request.user.is_staff:
@@ -160,7 +160,7 @@
 
 
 class RenkanNew(TemplateView):
-    
+
     template_name="renkan_new_confirm.html"
 
     def post(self, request):
@@ -169,7 +169,8 @@
         rk.rk_id = rk_id
         rk.owner = request.user
         rk.content = '{}'
-        rk.title = "Nouveau Renkan "
+        rk.title = "Nouveau Renkan"
+        rk.schema_version = "2"
         rk.save()
         hr = HdalabRenkan()
         hr.renkan = rk
@@ -179,9 +180,9 @@
 
 
 class RenkanEdit(TemplateView):
-    
+
     template_name="renkan_edit.html"
-    
+
     def get_context_data(self, **kwargs):
         switch_shape_url= {}
         context = super(RenkanEdit, self).get_context_data(**kwargs)
@@ -210,32 +211,32 @@
             context["switch_shape_url"] = switch_shape_url
         form = AuthenticationForm(self.request)
         context["form"] = form
-        
+
         return context
 
 
 
 class HdalabRenkanGetPut(RenkanGetPut):
-    
+
     @csrf_exempt
     def dispatch(self, *args, **kwargs):
         return super(HdalabRenkanGetPut, self).dispatch(*args, **kwargs)
-    
+
     def get(self, request):
-        
+
         # If a renkan id is set
         rk_id = request.GET.get("rk_id", "")
         if rk_id!="":
             rk = get_object_or_404(Renkan, rk_id=rk_id)
             return HttpResponse(rk.content, content_type="application/json")
-        
+
         shape = request.GET.get("shape", "")
         no_translate_langs = [ 'fr' ]
         lang = request.GET.get('lang',request.LANGUAGE_CODE)
-        
+
         # Start dict for renkan json
         now = datetime.now().strftime("%Y-%m-%d %H:%M")
-        
+
         content = {
           "id": unicode(uuid.uuid1()),
           "schema_version": 2,
@@ -248,22 +249,22 @@
           "views": [],
           "users": [],
         }
-        
+
         # category image dict
         cat_dict = {u"Créateur": static("hdalab/img/category_creator.png"),
                     u"Datation": static("hdalab/img/category_datation.png"),
                     u"Discipline artistique": static("hdalab/img/category_discipline.png"),
                     u"Localisation": static("hdalab/img/category_localisation.png"),
                     u"Ecole/Mouvement": static("hdalab/img/category_movement.png")}
-        
+
         # category image dict
         shapes = { "tag1": "polygon", "notice": "rectangle", "tag2": "star" }
-        
-        
+
+
         # Renkan Project ID
         project_id = unicode(uuid.uuid1())
-        
-        
+
+
         # If a notice id is set
         notice_id = request.GET.get("notice", "")
         if notice_id!="":
@@ -276,7 +277,7 @@
                 np = HorLineNodePlacer()
             elif shape=="vert":
                 np = LineNodePlacer()
-                
+
             np.init({"datasheet": (1, 1), "tags": (2, len(ordered_tags))})
             # Place notice :
             content["nodes"].append({
@@ -294,13 +295,13 @@
                 "shape": shapes["notice"]
             })
             notice_id = content["nodes"][0]["id"]
-            
+
             # Get translated labels
             translations = {}
             if lang not in no_translate_langs:
                 transqs = DbpediaFieldsTranslation.objects.filter(master__in = [ot.tag.dbpedia_fields if hasattr(ot.tag, 'dbpedia_fields') and ot.tag.dbpedia_fields else None for ot in ordered_tags], language_code = lang)
                 translations = dict([(trans.master_id,trans) for trans in transqs])
-            
+
             for ot in ordered_tags:
                 t = ot.tag
                 img_url = t.dbpedia_fields.thumbnail if hasattr(t, 'dbpedia_fields') and t.dbpedia_fields and t.dbpedia_fields.thumbnail else None
@@ -325,7 +326,7 @@
                   },
                   "shape": shapes["tag2"]
                 })
-                
+
             # Place edges
             for node in content["nodes"]:
                 content["edges"].append({
@@ -340,11 +341,11 @@
                     "to": node["id"],
                     "project_id": project_id,
                 })
-            
+
             response = json.dumps(content)
             return HttpResponse(response, content_type="application/json")
-        
-        
+
+
         # If a folder id is set
         folder_id = request.GET.get("folder", "")
         if folder_id!="":
@@ -363,23 +364,23 @@
                     n_tags.append(t)
             n_tags = [t.pk for t in n_tags]
             all_tags = Tag.objects.filter( pk__in=n_tags ).select_related("dbpedia_fields", "category")
-            
+
             # Get translated labels
             translations = {}
             if lang not in no_translate_langs:
                 transqs = DbpediaFieldsTranslation.objects.filter(master__in = [t.dbpedia_fields if hasattr(t, 'dbpedia_fields') and t.dbpedia_fields else None for t in all_tags], language_code = lang)
                 translations = dict([(trans.master_id,trans) for trans in transqs])
-            
+
             # Prepare Node placer :
             np = CircleNodePlacer()
             if shape=="horiz":
                 np = HorLineNodePlacer()
             elif shape=="vert":
                 np = LineNodePlacer()
-                
+
             np.init({"datasheet": (1, len(notices)), "tags": (2, len(all_tags))})
 
-            
+
             # Place notices
             for n in notices:
                 content["nodes"].append({
@@ -396,7 +397,7 @@
                   },
                   "shape": shapes["notice"]
                 })
-            
+
             # Place tags
             for t in all_tags:
                 img_url = t.dbpedia_fields.thumbnail if hasattr(t, 'dbpedia_fields') and t.dbpedia_fields and t.dbpedia_fields.thumbnail else None
@@ -422,7 +423,7 @@
                     },
                     "shape": shapes["tag2"]
                 })
-            
+
             # Place edges
             for n_pk in notice_tag_dict:
                 for tag_id in notice_tag_dict[n_pk]["tags"]:
@@ -439,11 +440,11 @@
                         "project_id": project_id,
                         #"created_by": "de68xf75y6hs5rgjhgghxbm217xk"
                     })
-            
+
             response = json.dumps(content)
             return HttpResponse(response, content_type="application/json")
-            
-        
+
+
         # Otherwise we build the datas
         # Get tags and countries
         labels = request.GET.get("label", "").split(",")
@@ -453,12 +454,12 @@
         label_list = [t for t in labels if t!=""]
         country_list = [c for c in countries if c!=""]
         all_tags = Tag.objects.filter( Q(label__in=label_list) | Q(dbpedia_uri__in=country_list) ).select_related("dbpedia_fields", "category")
-        
+
         # Get datasheets from ajax filter search
         temp_fitler = filter_generic(lang, period, ",".join(label_list), ",".join(country_list), content_count=18)
         filter_output = json.loads(temp_fitler)
         filter_output_to_bin = json.loads(temp_fitler)
-        
+
         #Keep only the 8 first ressources to create the graph
         #the 10 next are sent to the bins
         for i in range(len(filter_output["contents"])):
@@ -472,7 +473,7 @@
           "title": _("Plus de Ressources"), #TODO: Translate
           "list": filter_output_to_bin["contents"]
         }
-        
+
         # Prepare other tags
         related_tags = []
         all_labels = [t.label for t in all_tags]
@@ -493,8 +494,8 @@
                     related_tags.append({"label": t["label"], "thumbnail":thumbnail_url, "id":t["id"], "url":t["url"], 'wkpd_url': t['wkpd_url']})
                     all_labels.append(t["label"])
                 related_tags_dict[c["id"]].append(t["id"])
-        
-        
+
+
         # If possible, we search a dbpedia_fields thumbnail or category thumbnail for related tags
         r_tags = [t["label"] for t in related_tags if t["thumbnail"] is None or t["thumbnail"]=="" ]
         r_tags = Tag.objects.filter( label__in=r_tags ).select_related("dbpedia_fields", "category")
@@ -509,28 +510,28 @@
         for t in related_tags:
             if (t["thumbnail"] is None or t["thumbnail"]=="") and (t["label"] in r_tags_dict):
                 t["thumbnail"] = r_tags_dict[t["label"]]
-        
-        
+
+
         # Prepare Node placer :
         np = CircleNodePlacer()
         if shape=="horiz":
             np = HorLineNodePlacer()
         elif shape=="vert":
             np = LineNodePlacer()
-            
+
         len_tags = len(all_tags)
         if period:
             len_tags += 1
         np.init({"tags": (1, len_tags), "datasheet": (2, len(filter_output["contents"])), "related": (3, len(related_tags)), "northwest":(3 if shape=="circle" else 1, 1)})
-        
+
         #get tag abstract and label translations
-        
+
         tags_id = [t.id for t in all_tags] + [t['id'] for t in related_tags]
         translations = {}
         transqs = DbpediaFieldsTranslation.objects.filter(master__tag__in = tags_id, language_code = lang)
         translations = dict([(trans.master.tag.id,trans) for trans in transqs])
 
-        
+
         for t in all_tags:
             img_url = t.dbpedia_fields.thumbnail if hasattr(t, 'dbpedia_fields') and t.dbpedia_fields and t.dbpedia_fields.thumbnail else None
             if img_url is None and t.category is not None:
@@ -540,7 +541,7 @@
                 'label': filter_output["tagtranslations"][t.label] if t.label in filter_output["tagtranslations"] else t.label,
                 'abstract': translation_obj.abstract if translation_obj else ""
             }
-            
+
             content["nodes"].append({
               "id": unicode(uuid.uuid1()),
               "title": translation['label'],
@@ -570,7 +571,7 @@
               },
               "shape": shapes["tag1"]
             })
-        
+
         for c in filter_output["contents"]:
             content["nodes"].append({
               "id": c["id"],
@@ -586,7 +587,7 @@
               },
               "shape": shapes["notice"]
             })
-        
+
         for t in related_tags:
             translation_obj = translations.get(t['id'], None)
             translation = {
@@ -608,7 +609,7 @@
               },
               "shape": shapes["tag2"]
             })
-        
+
         for c_id in related_tags_dict:
             for tag_id in related_tags_dict[c_id]:
                 content["edges"].append({
@@ -624,14 +625,14 @@
                     "project_id": project_id,
                     #"created_by": "de68xf75y6hs5rgjhgghxbm217xk"
                 })
-        
+
         response = json.dumps(content)
-        
+
         return HttpResponse(response, content_type="application/json")
-    
-    
+
+
     def post(self, request):
-        
+
         rk_id = request.GET.get("rk_id", "")
         #data = json.loads(request.body)
         #logger.debug(data["edges"])
@@ -666,14 +667,14 @@
                     hr.state = HdalabRenkan.EDITION
                     hr.save()
                     return HttpResponse("rk_id=" + rk_id)
-         
-            
+
+
         return HttpResponse("NOT SAVED")
 
 
 
 class HdalabRenkanCopy(View):
-    
+
     def post(self, request, rk_id):
         rk = renkan_copier(request.user, rk_id)
         hr = HdalabRenkan()
@@ -686,7 +687,7 @@
 
 
 class HdalabRenkanDelete(View):
-    
+
     def post(self, request, rk_id):
         try:
             hr = HdalabRenkan.objects.get(renkan__rk_id=rk_id)
@@ -700,7 +701,7 @@
 
 
 class HdalabRenkanModerate(View):
-    
+
     def post(self, request, rk_id):
         form = HdalabRenkanStateForm(request.POST)
         if form.is_valid():
@@ -717,7 +718,7 @@
             return HttpResponseBadRequest("State form invalid")
 
 class HdalabRenkanFavorite(View):
-    
+
     def post(self, request, rk_id):
         form = HdalabRenkanFavoriteForm(request.POST)
         if form.is_valid():
@@ -736,10 +737,10 @@
 
     def get_object(self, queryset=None):
         return self.request.user
-    
+
     def get_success_url(self):
         return reverse('profile_home')
-    
+
 
 # Function copied from django.contrib.auth.views to simplify ajax login
 @sensitive_post_parameters()