--- a/.hgtags Wed Feb 27 18:22:42 2013 +0100
+++ b/.hgtags Mon May 13 11:27:04 2013 +0200
@@ -19,3 +19,14 @@
a73f1e6e1eed53bfdc8c17a0e014c579937bc888 V02.08.02
89e5ee57bd89ad5858848ad5c69950164d074a0c V02.08.03
ccca12eaa406a5138ce67a9870c58cf4ff9560cc V02.08.04
+eb20d3d467bedcd848a40f632cacc461c86521af V02.08.05
+188306b32f74b5d0b7bb643d7c169d62c67c2e1d V02.08.06
+0f2fe357ceea06884cf5d75413c66b976268fe54 V02.08.07
+d9c379ebcc5498a485ac7e1ab4167db5025c8eb6 V02.08.08
+d9c379ebcc5498a485ac7e1ab4167db5025c8eb6 V02.08.08
+0ad5b530b3069c97990bbea9437c55448b7a3ca3 V02.08.08
+95d0458aede85dd0b12290d513ae18c61bcfb4df V02.08.09
+11ff64324cc72598a70d53d9fe1180ded5d60d75 V02.08.10
+8a667bb502993b9aca9e41f04d5166396b3d8ad1 V02.08.11
+224c3a44056b54fb2c6d87f7033c7288db47dcf9 V02.08.12
+d027f7adad037575e353d87f8946bd872e8e5054 V02.08.13
--- a/src/ldtplatform/__init__.py Wed Feb 27 18:22:42 2013 +0100
+++ b/src/ldtplatform/__init__.py Mon May 13 11:27:04 2013 +0200
@@ -1,4 +1,4 @@
-VERSION = (2, 8, 4, "final", 0)
+VERSION = (2, 8, 13, "final", 0)
VERSION_STR = unicode(".".join(map(lambda i:"%02d" % (i,), VERSION[:2])))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldtplatform/management/__init__.py Mon May 13 11:27:04 2013 +0200
@@ -0,0 +1,1 @@
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldtplatform/management/commands/__init__.py Mon May 13 11:27:04 2013 +0200
@@ -0,0 +1,1 @@
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldtplatform/management/commands/loadandadddata.py Mon May 13 11:27:04 2013 +0200
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+'''
+Created on Mar 22, 2013
+
+@author: tc
+'''
+
+from django.contrib.auth.models import User, Group
+from django.core.management import call_command
+from django.core.management.base import BaseCommand, CommandError
+from ldt.ldt_utils.models import Content, Project
+from optparse import make_option
+import os.path
+import json
+from ldt.security.cache import cached_assign
+
+
+class Command(BaseCommand):
+ '''
+ Load users, medias, contents, project and guardian permissions from json file generated by dumpdata
+ '''
+
+ args = 'json_file'
+ help = 'Load users, medias, contents, project and guardian permissions from json file generated by dumpdata'
+
+ option_list = BaseCommand.option_list + (
+ make_option('-u', '--ignore-users',
+ dest= 'ignore_users',
+ default= None,
+ help= 'list of usernames to ignore (separated by comma)'
+ ),
+ make_option('-g', '--ignore-groups',
+ dest= 'ignore_groups',
+ default= None,
+ help= 'list of group names to ignore (separated by comma)'
+ ),
+ make_option('-c', '--ignore-contents',
+ dest= 'ignore_contents',
+ default= None,
+ help= 'list of content iri_id to ignore (separated by comma)'
+ ),
+ make_option('-n', '--new-group',
+ dest= 'new_group',
+ default= None,
+ help= 'The script will create a new group and assign view permission for all new media/contents to all the new users'
+ ),
+ )
+
+
+ def __safe_get(self, dict_arg, key, conv = lambda x: x, default= None):
+ val = dict_arg.get(key, default)
+ return conv(val) if val else default
+
+ def __safe_decode(self, s):
+ if not isinstance(s, basestring):
+ return s
+ try:
+ return s.decode('utf8')
+ except:
+ try:
+ return s.decode('latin1')
+ except:
+ return s.decode('utf8','replace')
+
+ def handle(self, *args, **options):
+
+ # Test path
+ if len(args) != 1:
+ raise CommandError("The command has no argument or too much arguments. Only one is needed : the json file path.")
+
+ # Check if temporary files already exist
+ path = os.path.abspath(args[0])
+ dir = os.path.dirname(path)
+ path_file1 = os.path.join(dir, 'temp_data1.json')
+ path_file2 = os.path.join(dir, 'temp_data2.json')
+ do_import = True
+ if os.path.exists(path_file1) or os.path.exists(path_file2):
+ confirm = raw_input(("""
+ The folder %s contains the files temp_data1.json or temp_data2.json. These files will be overwritten.
+
+ Do you want to continue ?
+
+ Type 'y' to continue, or 'n' to quit: """) % dir)
+ do_import = (confirm == "y")
+
+ # Continue
+ if do_import:
+ # Init ignore list
+ user_ignore_list = ["admin","AnonymousUser"]
+ group_ignore_list = ["everyone","Hashcut IRI","Hashcut BPI"]
+ content_ignore_list = []
+
+ # Update ignore list
+ ignore_users = options.get('ignore_users', None)
+ ignore_groups = options.get('ignore_groups', None)
+ ignore_contents = options.get('ignore_contents', None)
+ if ignore_users:
+ for u in ignore_users.split(","):
+ user_ignore_list.append(u)
+ if ignore_groups:
+ for g in ignore_groups.split(","):
+ group_ignore_list.append(g)
+ if ignore_contents:
+ for c in ignore_contents.split(","):
+ content_ignore_list.append(c)
+
+ # Begin work...
+ print("Opening file...")
+ json_file = open(path,'rb')
+ print("Loading datas...")
+ data = json.load(json_file)
+ print("%d objects found..." % len(data))
+ content_pk_id = {}
+ project_pk_id = {}
+ # datas for file 1 : users, medias, contents, projects
+ data_file1 = []
+ # datas for file 2 : guardian permissions
+ data_file2 = []
+ # users
+ usernames = []
+ for obj in data:
+ if "model" in obj:
+ m = obj["model"]
+ if m!="guardian.userobjectpermission" and m!="guardian.groupobjectpermission":
+ # We remove user admin, user AnonymousUser, group everyone and users and contents in ignore list
+ # (a bit fuzzy for media and src but good for others)
+ if not ((m=="auth.user" and "username" in obj["fields"] and obj["fields"]["username"] in user_ignore_list) or \
+ (m=="auth.group" and "name" in obj["fields"] and obj["fields"]["name"] in group_ignore_list) or \
+ (m=="ldt_utils.media" and "src" in obj["fields"] and any((s+".") in obj["fields"]["src"] for s in content_ignore_list)) or \
+ (m=="ldt_utils.content" and "iri_id" in obj["fields"] and obj["fields"]["iri_id"] in content_ignore_list)):
+ data_file1.append(obj)
+ #else:
+ # print("I don't keep from datas %s, pk = %s" % (m, obj["pk"]))
+ if "pk" in obj:
+ # For both contents and projects, we save 2 dicts [id]=pk and [pk]=id
+ # It will enable to parse and replace easily the old pk by the new ones in the permission datas
+ if m=="ldt_utils.project":
+ pk = str(obj["pk"])
+ id = obj["fields"]["ldt_id"]
+ project_pk_id[pk] = id
+ elif m=="ldt_utils.content":
+ pk = str(obj["pk"])
+ id = obj["fields"]["iri_id"]
+ content_pk_id[pk] = id
+ obj["pk"] = None
+ else:
+ obj["pk"] = None
+ data_file2.append(obj)
+ # Save usernames except AnonymousUser
+ if m=="auth.user" and "username" in obj["fields"] and obj["fields"]["username"]!="AnonymousUser":
+ usernames.append(obj["fields"]["username"])
+ json_file.close()
+ #data_file1.append(project_pk_id)
+ #data_file1.append(project_id_pk)
+ #data_file1.append(content_pk_id)
+ #data_file1.append(content_id_pk)
+
+ # Check if import will fail with the usernames
+ existing_usernames = User.objects.all().values_list("username", flat=True)
+ for un in usernames:
+ if un in existing_usernames and un not in user_ignore_list:
+ print("import will fail with username : %s" % str(un))
+ do_import = False
+
+ # Check if import will fail with the contents's iri_id
+ existing_iri_ids = Content.objects.all().values_list("iri_id", flat=True)
+ new_iri_ids = list(content_pk_id.values())
+ for iri_id in new_iri_ids:
+ if iri_id in existing_iri_ids and iri_id not in content_ignore_list:
+ print("import will fail with iri_id : %s" % str(iri_id))
+ do_import = False
+ if not do_import:
+ print("Add the usernames and iri_id to the ignore parameters -u and -c")
+ return ""
+
+ # We save the datas in a file in order to simply call loaddata
+ print("Writing %s..." % path_file1)
+ file1 = open(path_file1, 'w')
+ json.dump(data_file1, file1, indent=2)
+ file1.close()
+ print("Updating permissions ids...")
+ # We replace the old pk by the natural keys in the permission datas
+ ignored_project_pks = []
+ ignored_content_pks = []
+ perm_data = []
+ for obj in data_file2:
+ type = obj["fields"]["content_type"][1]
+ old_pk = obj["fields"]["object_pk"]
+ if type=="project":
+ try:
+ obj["fields"]["object_pk"] = project_pk_id[old_pk]
+ except:
+ # The dumpdata can contain permissions for removed projects
+ ignored_project_pks.append(old_pk)
+ continue
+ # Keeping only valuables objs avoids errors when we we get the new pks
+ perm_data.append(obj)
+ elif type == "content":
+ try:
+ obj["fields"]["object_pk"] = content_pk_id[old_pk]
+ except:
+ # The dumpdata can contain permissions for removed contents
+ ignored_content_pks.append(old_pk)
+ continue
+ # Keeping only valuables objs avoids errors when we we get the new pks
+ obj_id = obj["fields"]["object_pk"]
+ model = obj["model"] # "guardian.groupobjectpermission" or "guardian.userobjectpermission"
+ if obj_id in content_ignore_list:
+ if model=="guardian.groupobjectpermission":
+ if obj["fields"]["group"][0] in group_ignore_list:
+ #print("permissions : j'ignore %s pour le groupe %s ..." % (obj_id, obj["fields"]["group"][0]))
+ continue
+ elif model=="guardian.userobjectpermission":
+ if obj["fields"]["user"][0] in user_ignore_list:
+ #print("permissions : j'ignore %s pour le user %s ..." % (obj_id, obj["fields"]["user"][0]))
+ continue
+ perm_data.append(obj)
+ # We inform the user
+ print("%d project permissions were ignored because projects do not exist in the current datas." % len(ignored_project_pks))
+ print("%d content permissions were ignored because contents do not exist in the current datas." % len(ignored_content_pks))
+ print("Loading datas from temporary file %s ..." % path_file1)
+ # Loaddata from file 1
+ call_command("loaddata", path_file1)
+
+ # Now users, medias, contents, projects have been saved.
+ # We can get the new pk for contents and projects
+ # Careful: in Python 3, dict.copy().values() will be prefered to list(dict.values())
+ # We use select_related("media_obj") because it will usefull with the new group
+ contents = Content.objects.filter(iri_id__in=list(content_pk_id.values())).select_related("media_obj")#.values('pk', 'iri_id')
+ content_id_pk = {}
+ for c in contents:
+ content_id_pk[c.iri_id] = str(c.pk)
+ projects = Project.objects.filter(ldt_id__in=list(project_pk_id.values())).values('pk', 'ldt_id')
+ project_id_pk = {}
+ for p in projects:
+ project_id_pk[p["ldt_id"]] = str(p["pk"])
+
+ # Now we reparse the perm_data and update with the new pks
+ for obj in perm_data:
+ type = obj["fields"]["content_type"][1]
+ obj_id = obj["fields"]["object_pk"]
+ if type=="project":
+ obj["fields"]["object_pk"] = project_id_pk[obj_id]
+ elif type == "content":
+ obj["fields"]["object_pk"] = content_id_pk[obj_id]
+
+
+ # We save the datas in a file in order to simply call loaddata
+ print("Writing %s..." % path_file2)
+ file2 = open(path_file2, 'w')
+ json.dump(perm_data, file2, indent=2)
+ file2.close()
+ print("Loading permissions from temporary file %s ..." % path_file2)
+ call_command("loaddata", path_file2)
+
+ # Remove temp files
+ print("Removing temporary files...")
+ try:
+ os.remove(path_file1)
+ except:
+ print("Removing temporary files %s failed" % path_file1)
+ try:
+ os.remove(path_file2)
+ except:
+ print("Removing temporary files %s failed" % path_file2)
+
+ # Now that all datas have been imported we can create the new group and assign permissions if asked
+ new_group = options.get('new_group', None)
+ if new_group and len(usernames)>0:
+ print("Set view permissions for the new group %s ..." % new_group)
+ # Get or create group
+ new_grp, _ = Group.objects.get_or_create(name=new_group)
+ # Add users to the group
+ users = User.objects.filter(username__in=usernames)
+ for u in users:
+ new_grp.user_set.add(u)
+ # Get all contents and medias
+ for c in contents:
+ cached_assign('view_content', new_grp, c)
+ cached_assign('view_media', new_grp, c.media_obj)
+
+ print("Indexing imported projects ...")
+ call_command('reindex', projects=True, no_content=True)
+
+ # This is the end
+ print("This is the end")
+
+
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldtplatform/migrations/0001_initial.py Mon May 13 11:27:04 2013 +0200
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ pass
+
+ def backwards(self, orm):
+ pass
+
+ models = {
+
+ }
+
+ complete_apps = ['ldtplatform']
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldtplatform/migrations/0002_iri_url_update.py Mon May 13 11:27:04 2013 +0200
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import DataMigration
+from django.db import models
+from django.core.management import call_command
+
+class Migration(DataMigration):
+
+ def forwards(self, orm):
+ # Call the command from ldt_utils app
+ call_command('updateiriurlinprojects')
+
+ def backwards(self, orm):
+ "Write your backwards methods here."
+
+ models = {
+
+ }
+
+ complete_apps = ['ldtplatform']
+ symmetrical = True
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldtplatform/migrations/0003_recalculate_contentstat.py Mon May 13 11:27:04 2013 +0200
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import DataMigration
+from django.db import models
+from django.core.management import call_command
+
+class Migration(DataMigration):
+
+ def forwards(self, orm):
+ # Call the command from ldt_utils app
+ call_command('statannotation')
+
+ def backwards(self, orm):
+ "Write your backwards methods here."
+
+ models = {
+
+ }
+
+ complete_apps = ['ldtplatform']
+ symmetrical = True
--- a/src/ldtplatform/settings.py Wed Feb 27 18:22:42 2013 +0100
+++ b/src/ldtplatform/settings.py Mon May 13 11:27:04 2013 +0200
@@ -188,6 +188,7 @@
LDT_MAX_CONTENTS_PER_PAGE = 10
LDT_MAX_PROJECTS_PER_PAGE = 10
LDT_FRONT_MEDIA_PER_PAGE = 9
+LDT_FRONT_PROJECTS_PER_PAGE = 12
OAUTH_PROVIDER_KEY_SIZE = 32
OAUTH_PROVIDER_SECRET_SIZE = 32
--- a/src/requirement.txt Wed Feb 27 18:22:42 2013 +0100
+++ b/src/requirement.txt Mon May 13 11:27:04 2013 +0200
@@ -1,3 +1,3 @@
-ldt (==1.46)
+ldt (==1.46.9)
hashcut (==0.9)
django-chunked-uploads (==HEAD)