--- a/src/spel/management/commands/loadspeldata.py Tue Mar 04 14:48:25 2014 +0100
+++ b/src/spel/management/commands/loadspeldata.py Thu Mar 06 16:28:30 2014 +0100
@@ -5,19 +5,25 @@
@author: tc
'''
+from datetime import datetime
from dircache import listdir
-from django.contrib.auth.models import User, Group
-from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from genericpath import isdir, isfile
-from ldt.ldt_utils.models import Content, Project
+from ldt.api.ldt.serializers.cinelabserializer import CinelabSerializer
+from ldt.ldt_utils.models import Media, Content, Project
+from ldt.ldt_utils.stat import update_stat_content
+from ldt.ldt_utils.utils import generate_uuid
+from ldt.security import set_current_user, get_current_user_or_admin
from ldt.security.cache import cached_assign
-from ldt.ldt_utils.models import Media, Content, Project
from optparse import make_option
from os.path import join
import json
+import lxml.etree
import os.path
-from ldt.ldt_utils.utils import generate_uuid
+import time
+
+import logging
+logger = logging.getLogger(__name__)
class Command(BaseCommand):
@@ -74,220 +80,135 @@
json_path = join(path,f,"cinelab.json")
if isfile(json_path):
print("Parsing json file %s ..." % json_path)
+ json_data = False
try:
file_data = open(json_path)
json_data = json.load(file_data)
file_data.close()
except:
- print(" Error while parsing json file.")
-
- # Save media
- media, _ = Media.objects.get_or_create(src=f+"/original.mp4", duration=json_data["medias"][0]["meta"]["duration"])
- ctt_id = generate_uuid()
- content = Content.objects.create(iri_id=ctt_id, iriurl=ctt_id+u"/"+ctt_id+u".iri", media_obj=media, title=json_data["meta"]["dc:title"])
-
-
-
+ print("Error while parsing json file.")
+ if json_data:
+ # Save media and content
+ media, _ = Media.objects.get_or_create(src=f+"/original.mp4", duration=json_data["medias"][0]["meta"]["duration"])
+ media.is_public = True
+ ctt_id = generate_uuid()
+ content = Content.objects.create(iri_id=ctt_id,
+ iriurl=ctt_id+u"/"+ctt_id+u".iri",
+ media_obj=media,
+ title=json_data["meta"]["dc:title"],
+ duration=json_data["medias"][0]["meta"]["duration"],
+ content_creation_date = json_data["meta"]["dc:created"])
+ content.is_public = True
+ # Get content front projet
+ proj = content.front_project
+ username = proj.owner.username
+ now = datetime.utcnow().isoformat()
+ # Start data to send to api
+ proj_data = {}
+ proj_data["meta"] = {}
+ proj_data["meta"]["id"] = proj.ldt_id
+ proj_data["meta"]["dc:title"] = proj.title
+ proj_data["meta"]["dc:creator"] = username
+ proj_data["meta"]["dc:description"] = "description added"
+ proj_data["meta"]["dc:created"] = json_data["meta"]["dc:created"]
+ proj_data["meta"]["dc:modified"] = json_data["meta"]["dc:modified"]
+ proj_data["meta"]["dc:contributor"] = username
+ proj_data["medias"] = []
+ proj_data["medias"].append({"id": content.iri_id})
+ # The tags and annotations (main part)
+ proj_data["tags"] = []
+ proj_data["annotations"] = []
+ tags_id_label = {}
+ tags_label_id = {}
+ for a in json_data["annotations"]:
+ # "content": { "data": { "modalites_sceniques": "costumes,décors",... } }
+ if type(a["content"]["data"]) == type(dict()):
+ annot_tags = []
+ desc = ""
+ # Build tags
+ for k,v in a["content"]["data"].iteritems():
+ if k!="commentaire":
+ v = v.split(",")
+ for val in v:
+ tag_label = k + u": " + val.strip()
+ if val.strip()!="":
+ if not tag_label in tags_label_id:
+ tags_label_id[tag_label] = generate_uuid()
+ tags_id_label[tags_label_id[tag_label]] = tag_label
+ #logger.debug("CREATED")
+ #logger.debug(tags_label_id[tag_label] + " = " + tags_id_label[tags_label_id[tag_label]])
+ proj_data["tags"].append({"meta": { "dc:title": tag_label }, "id": tags_label_id[tag_label] })
+ annot_tags.append({"id-ref": tags_label_id[tag_label] })
+ else:
+ desc = v
+ # Build annotation with needed fields
+ proj_data["annotations"].append({
+ "content": {
+ "mimetype": "application/x-ldt-structured",
+ "description": desc,
+# "img": {
+# "src": ""
+# },
+ "title": a["id"],
+# "polemics": [ ],
+ },
+ "begin": a["begin"],
+ "meta": {
+# "dc:contributor": "admin",
+ "id-ref": a["type"],
+ "dc:created": now,
+# "dc:modified": "2014-03-04T16:40:23.609971",
+ "dc:creator": username
+ },
+ "end": a["end"],
+ "tags": annot_tags,
+ "color": "16763904",
+ "media": ctt_id,
+ "id": a["id"]
+ })
+
+ # The annotation-types
+ proj_data["annotation-types"] = []
+ at_ids = []
+ for at in json_data["annotation_types"]:
+ proj_data["annotation-types"].append({
+# dc:contributor: "admin",
+ "dc:creator": username,
+ "dc:title": at["id"],
+ "id": at["id"],
+# dc:created: "2014-03-04T14:51:13.907674",
+ "dc:description": ""
+# dc:modified: "2014-03-04T14:51:13.907674"
+ })
+ at_ids.append({ "id-ref": at["id"] })
+ # The list of annotation-types
+ list_id = generate_uuid()
+ proj_data["lists"] = [{
+ "items": at_ids,
+ "meta": {
+ "dc:creator": username,
+ "id-ref": ctt_id,
+ "dc:title": "SPEL",
+ "dc:description": ""
+ },
+ "id": list_id
+ }]
+ # The views for default display
+ proj_data["views"] = [{
+ "id": generate_uuid(),
+ "contents": [ ctt_id ],
+ "annotation_types": [atid["id-ref"] for atid in at_ids]
+ }]
+
+ serializr = CinelabSerializer()
+ serializr.validate_cinelab_json(proj_data)
+ ldt_xml = serializr.cinelab_to_ldt(proj_data)
+ proj.ldt = lxml.etree.tostring(ldt_xml, pretty_print=True)
+ #logger.debug(proj.ldt)
+ proj.save()
+ update_stat_content(content)
else:
print("Ignoring or not exist %s ..." % json_path)
- """
- # Init ignore list
- user_ignore_list = ["admin","AnonymousUser"]
- group_ignore_list = ["everyone","Hashcut IRI","Hashcut BPI"]
- content_ignore_list = []
-
- # Update ignore list
- ignore_users = options.get('ignore_users', None)
- ignore_groups = options.get('ignore_groups', None)
- ignore_contents = options.get('ignore_contents', None)
- if ignore_users:
- for u in ignore_users.split(","):
- user_ignore_list.append(u)
- if ignore_groups:
- for g in ignore_groups.split(","):
- group_ignore_list.append(g)
- if ignore_contents:
- for c in ignore_contents.split(","):
- content_ignore_list.append(c)
-
- # Begin work...
- print("Opening file...")
- json_file = open(path,'rb')
- print("Loading datas...")
- data = json.load(json_file)
- print("%d objects found..." % len(data))
- content_pk_id = {}
- project_pk_id = {}
- # datas for file 1 : users, medias, contents, projects
- data_file1 = []
- # datas for file 2 : guardian permissions
- data_file2 = []
- # users
- usernames = []
- for obj in data:
- if "model" in obj:
- m = obj["model"]
- if m!="guardian.userobjectpermission" and m!="guardian.groupobjectpermission":
- # We remove user admin, user AnonymousUser, group everyone and users and contents in ignore list
- # (a bit fuzzy for media and src but good for others)
- if not ((m=="auth.user" and "username" in obj["fields"] and obj["fields"]["username"] in user_ignore_list) or \
- (m=="auth.group" and "name" in obj["fields"] and obj["fields"]["name"] in group_ignore_list) or \
- (m=="ldt_utils.media" and "src" in obj["fields"] and any((s+".") in obj["fields"]["src"] for s in content_ignore_list)) or \
- (m=="ldt_utils.content" and "iri_id" in obj["fields"] and obj["fields"]["iri_id"] in content_ignore_list)):
- data_file1.append(obj)
- #else:
- # print("I don't keep from datas %s, pk = %s" % (m, obj["pk"]))
- if "pk" in obj:
- # For both contents and projects, we save 2 dicts [id]=pk and [pk]=id
- # It will enable to parse and replace easily the old pk by the new ones in the permission datas
- if m=="ldt_utils.project":
- pk = str(obj["pk"])
- ldt_id = obj["fields"]["ldt_id"]
- project_pk_id[pk] = ldt_id
- elif m=="ldt_utils.content":
- pk = str(obj["pk"])
- ldt_id = obj["fields"]["iri_id"]
- content_pk_id[pk] = ldt_id
- obj["pk"] = None
- else:
- obj["pk"] = None
- data_file2.append(obj)
- # Save usernames except AnonymousUser
- if m=="auth.user" and "username" in obj["fields"] and obj["fields"]["username"]!="AnonymousUser":
- usernames.append(obj["fields"]["username"])
- json_file.close()
- #data_file1.append(project_pk_id)
- #data_file1.append(project_id_pk)
- #data_file1.append(content_pk_id)
- #data_file1.append(content_id_pk)
-
- # Check if import will fail with the usernames
- existing_usernames = User.objects.all().values_list("username", flat=True)
- for un in usernames:
- if un in existing_usernames and un not in user_ignore_list:
- print("import will fail with username : %s" % str(un))
- do_import = False
-
- # Check if import will fail with the contents's iri_id
- existing_iri_ids = Content.objects.all().values_list("iri_id", flat=True)
- new_iri_ids = list(content_pk_id.values())
- for iri_id in new_iri_ids:
- if iri_id in existing_iri_ids and iri_id not in content_ignore_list:
- print("import will fail with iri_id : %s" % str(iri_id))
- do_import = False
- if not do_import:
- print("Add the usernames and iri_id to the ignore parameters -u and -c")
- return ""
-
- # We save the datas in a file in order to simply call loaddata
- print("Writing %s..." % path_file1)
- file1 = open(path_file1, 'w')
- json.dump(data_file1, file1, indent=2)
- file1.close()
- print("Updating permissions ids...")
- # We replace the old pk by the natural keys in the permission datas
- ignored_project_pks = []
- ignored_content_pks = []
- perm_data = []
- for obj in data_file2:
- content_type = obj["fields"]["content_type"][1]
- old_pk = obj["fields"]["object_pk"]
- if content_type =="project":
- try:
- obj["fields"]["object_pk"] = project_pk_id[old_pk]
- except:
- # The dumpdata can contain permissions for removed projects
- ignored_project_pks.append(old_pk)
- continue
- # Keeping only valuables objs avoids errors when we we get the new pks
- perm_data.append(obj)
- elif content_type == "content":
- try:
- obj["fields"]["object_pk"] = content_pk_id[old_pk]
- except:
- # The dumpdata can contain permissions for removed contents
- ignored_content_pks.append(old_pk)
- continue
- # Keeping only valuables objs avoids errors when we we get the new pks
- obj_id = obj["fields"]["object_pk"]
- model = obj["model"] # "guardian.groupobjectpermission" or "guardian.userobjectpermission"
- if obj_id in content_ignore_list:
- if model=="guardian.groupobjectpermission":
- if obj["fields"]["group"][0] in group_ignore_list:
- #print("permissions : j'ignore %s pour le groupe %s ..." % (obj_id, obj["fields"]["group"][0]))
- continue
- elif model=="guardian.userobjectpermission":
- if obj["fields"]["user"][0] in user_ignore_list:
- #print("permissions : j'ignore %s pour le user %s ..." % (obj_id, obj["fields"]["user"][0]))
- continue
- perm_data.append(obj)
- # We inform the user
- print("%d project permissions were ignored because projects do not exist in the current datas." % len(ignored_project_pks))
- print("%d content permissions were ignored because contents do not exist in the current datas." % len(ignored_content_pks))
- print("Loading datas from temporary file %s ..." % path_file1)
- # Loaddata from file 1
- call_command("loaddata", path_file1)
-
- # Now users, medias, contents, projects have been saved.
- # We can get the new pk for contents and projects
- # Careful: in Python 3, dict.copy().values() will be prefered to list(dict.values())
- # We use select_related("media_obj") because it will usefull with the new group
- contents = Content.objects.filter(iri_id__in=list(content_pk_id.values())).select_related("media_obj")#.values('pk', 'iri_id')
- content_id_pk = {}
- for c in contents:
- content_id_pk[c.iri_id] = str(c.pk)
- projects = Project.objects.filter(ldt_id__in=list(project_pk_id.values())).values('pk', 'ldt_id')
- project_id_pk = {}
- for p in projects:
- project_id_pk[p["ldt_id"]] = str(p["pk"])
-
- # Now we reparse the perm_data and update with the new pks
- for obj in perm_data:
- content_type = obj["fields"]["content_type"][1]
- obj_id = obj["fields"]["object_pk"]
- if content_type=="project":
- obj["fields"]["object_pk"] = project_id_pk[obj_id]
- elif content_type == "content":
- obj["fields"]["object_pk"] = content_id_pk[obj_id]
-
-
- # We save the datas in a file in order to simply call loaddata
- print("Writing %s..." % path_file2)
- file2 = open(path_file2, 'w')
- json.dump(perm_data, file2, indent=2)
- file2.close()
- print("Loading permissions from temporary file %s ..." % path_file2)
- call_command("loaddata", path_file2)
-
- # Remove temp files
- print("Removing temporary files...")
- try:
- os.remove(path_file1)
- except:
- print("Removing temporary files %s failed" % path_file1)
- try:
- os.remove(path_file2)
- except:
- print("Removing temporary files %s failed" % path_file2)
-
- # Now that all datas have been imported we can create the new group and assign permissions if asked
- new_group = options.get('new_group', None)
- if new_group and len(usernames)>0:
- print("Set view permissions for the new group %s ..." % new_group)
- # Get or create group
- new_grp, _ = Group.objects.get_or_create(name=new_group)
- # Add users to the group
- users = User.objects.filter(username__in=usernames)
- for u in users:
- new_grp.user_set.add(u)
- # Get all contents and medias
- for c in contents:
- cached_assign('view_content', new_grp, c)
- cached_assign('view_media', new_grp, c.media_obj)
-
- print("Indexing imported projects ...")
- call_command('reindex', projects=True, no_content=True)
- """
# This is the end
print("This is the end")
--- a/src/spel/static/spel/js/spectacle.js Tue Mar 04 14:48:25 2014 +0100
+++ b/src/spel/static/spel/js/spectacle.js Thu Mar 06 16:28:30 2014 +0100
@@ -39,27 +39,42 @@
}
});
-
+ // Data 1
var data1 = [
- {label: "Performance", value: "Performance"},
- {label: "Discussion", value: "Discussion"}
+ {label: "Performance", value: "performance"},
+ {label: "Discussion", value: "discussion"}
];
$("#mulsel1").multiselect('dataprovider', data1);
- var data2 = [
- {label: "Lumière", value: "Lumière"},
- {label: "Son", value: "Son"},
- {label: "Musique", value: "Musique"},
- {label: "Décor", value: "Décor"}
- ];
- $("#mulsel2").multiselect('dataprovider', data2);
- var data3 = [
- {label: "PA", value: "Lumière"},
- {label: "PB", value: "Son"},
- {label: "PC", value: "Musique"},
- {label: "PD", value: "Décor"},
- {label: "PE", value: "Décor"}
- ];
- $("#mulsel3").multiselect('dataprovider', data3);
+
+ // Data 2 : modalités scéniques
+ $.ajax({
+ url: urlMS
+ })
+ .done(function( data ) {
+ var o = data.objects;
+ var n = data.objects.length;
+ var data2 = [];
+ for(var i=0;i<n;i++){
+ data2.push({label: o[i].name.substr(21), value: o[i].name})
+ }
+ $("#mulsel2").multiselect('dataprovider', data2);
+ $("#mulsel2").multiselect('enable');
+ });
+
+ // Data 3 : Personnages
+ $.ajax({
+ url: urlP
+ })
+ .done(function( data ) {
+ var o = data.objects;
+ var n = data.objects.length;
+ var data3 = [];
+ for(var i=0;i<n;i++){
+ data3.push({label: o[i].name.substr(12), value: o[i].name})
+ }
+ $("#mulsel3").multiselect('dataprovider', data3);
+ $("#mulsel3").multiselect('enable');
+ });
// Annotations :
$("#mulsel4").multiselect('disable');