remove command not meant to be here and import modifs from maintenance head.
authorcavaliet
Mon, 08 Apr 2013 17:24:42 +0200
changeset 1142 6c61660e51a2
parent 1135 d294c6f552dc
child 1152 351782e601e7
remove command not meant to be here and import modifs from maintenance head.
.settings/org.eclipse.core.resources.prefs
src/ldt/ldt/ldt_utils/templates/ldt/ldt_utils/partial/embed_player.html
src/ldt/ldt/ldt_utils/views/content.py
src/ldt/ldt/management/commands/loadandadddata.py
src/ldt/ldt/static/ldt/swf/ldt/LignesDeTempsFlex.swf
--- a/.settings/org.eclipse.core.resources.prefs	Tue Apr 02 12:27:40 2013 +0200
+++ b/.settings/org.eclipse.core.resources.prefs	Mon Apr 08 17:24:42 2013 +0200
@@ -1,4 +1,4 @@
-#Fri Mar 22 12:25:32 CET 2013
+#Mon Apr 08 17:07:34 CEST 2013
 eclipse.preferences.version=1
 encoding//src/ldt/ldt/core/migrations/0001_initial.py=utf-8
 encoding//src/ldt/ldt/core/migrations/0002_auto__del_owner.py=utf-8
@@ -37,7 +37,6 @@
 encoding//src/ldt/ldt/ldt_utils/migrations/0025_chg_site_domain.py=utf-8
 encoding//src/ldt/ldt/ldt_utils/migrations/0026_set_relative_ldtproject.py=utf-8
 encoding//src/ldt/ldt/ldt_utils/views/json.py=utf-8
-encoding//src/ldt/ldt/management/commands/loadandadddata.py=utf-8
 encoding//src/ldt/ldt/management/commands/synciri.py=utf-8
 encoding//src/ldt/ldt/management/utils.py=utf-8
 encoding//src/ldt/ldt/test/test_runner.py=utf-8
--- a/src/ldt/ldt/ldt_utils/templates/ldt/ldt_utils/partial/embed_player.html	Tue Apr 02 12:27:40 2013 +0200
+++ b/src/ldt/ldt/ldt_utils/templates/ldt/ldt_utils/partial/embed_player.html	Mon Apr 08 17:24:42 2013 +0200
@@ -39,15 +39,6 @@
             type: "AutoPlayer",
     {% if external_url %}
             video: "{{ external_url }}",
-    {% else %}
-            streamer: function(_url) {
-                var _matches = _url.match(/^[^\/]+\/\/[^\/]+\/[^\/]+\//);
-                if (_matches) {
-                    return _matches[0];
-                } else {
-                    return _url;
-                }
-            },
     {% endif %}
             height: 300,
             autostart: true
--- a/src/ldt/ldt/ldt_utils/views/content.py	Tue Apr 02 12:27:40 2013 +0200
+++ b/src/ldt/ldt/ldt_utils/views/content.py	Mon Apr 08 17:24:42 2013 +0200
@@ -44,9 +44,27 @@
         del cleaned_data["media_file"]
         if not cleaned_data['videopath']:
             cleaned_data['videopath'] = settings.STREAM_URL
-        # if the source is already http:// or rtmp:// we don't have to add STREAM_URL
-        if cleaned_data['src'].startswith("rtmp://") or cleaned_data['src'].startswith("http://") or cleaned_data['src'].startswith("https://"):
-            cleaned_data['videopath'] = ''    
+        # if the source is already http:// we don't have to add STREAM_URL
+        if cleaned_data['src'].startswith("http://") or cleaned_data['src'].startswith("https://"):
+            cleaned_data['videopath'] = ''
+        # if the source is rtmp:// we parse the url to add a correct rtmp provider as videopath
+        elif cleaned_data['src'].startswith("rtmp://"):
+            cleaned_data['videopath'] = ''
+            # If the url is kind of rtmp://site/flv:path/to/file.flv or rtmp://site/mp4:path/to/file.mp4, we parse it.
+            # If the url is kind of rtmp://site/path/to/file, we don't parse it because we can't get the right streamer.
+            a = cleaned_data['src'].split(":")
+            if len(a)==3 and (a[1].endswith("flv") or a[1].endswith("mp4") or a[1].endswith("mp3")):
+                # We update with the good streamer
+                cleaned_data['videopath'] = a[0] + ":" + a[1][:-3]
+                # We remove the "flv:" from the url because it's useless in the real url
+                if a[1].endswith("flv"):
+                    cleaned_data['src'] = a[2]
+                else:
+                    cleaned_data['src'] = a[1][-3:] + ":" + a[2]
+                # We remove the ".mp3" at the the end of the src if necessary
+                if a[1].endswith("mp3") and cleaned_data['src'].endswith(".mp3"):
+                    cleaned_data['src'] = cleaned_data['src'][:-4]
+        # We get or create the media with the correct datas
         media, created = Media.objects.get_or_create(src=cleaned_data['src'], defaults=cleaned_data) #@UndefinedVariable
     
     elif media_input_type == "url" or media_input_type == "upload" :
--- a/src/ldt/ldt/management/commands/loadandadddata.py	Tue Apr 02 12:27:40 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,259 +0,0 @@
-# -*- coding: utf-8 -*-
-'''
-Created on Mar 22, 2013
-
-@author: tc
-'''
-
-from ..utils import show_progress
-from django.contrib.auth.models import User, Group
-from django.core.management import call_command
-from django.core.management.base import BaseCommand, CommandError
-from ldt.ldt_utils.models import Content, Project
-from optparse import make_option
-import os.path
-import json
-from ldt.security.cache import cached_assign
-
-
-class Command(BaseCommand):
-    '''
-    Load users, medias, contents, project and guardian permissions from json file generated by dumpdata
-    '''
-
-    args = 'json_file'
-    help = 'Load users, medias, contents, project and guardian permissions from json file generated by dumpdata'
-    
-    option_list = BaseCommand.option_list + (
-        make_option('-u', '--ignore-users',
-            dest= 'ignore_users',
-            default= None,
-            help= 'list of usernames to ignore (separated by comma)'
-        ),
-        make_option('-g', '--ignore-groups',
-            dest= 'ignore_groups',
-            default= None,
-            help= 'list of group names to ignore (separated by comma)'
-        ),
-        make_option('-c', '--ignore-contents',
-            dest= 'ignore_contents',
-            default= None,
-            help= 'list of content iri_id to ignore (separated by comma)'
-        ),
-        make_option('-n', '--new-group',
-            dest= 'new_group',
-            default= None,
-            help= 'The script will create a new group and assign view permission for all new media/contents to all the new users'
-        ),
-    )
-    
-    
-    def __safe_get(self, dict_arg, key, conv = lambda x: x, default= None):
-        val = dict_arg.get(key, default)
-        return conv(val) if val else default
-
-    def __safe_decode(self, s):
-        if not isinstance(s, basestring):
-            return s
-        try:
-            return s.decode('utf8')
-        except:
-            try:
-                return s.decode('latin1')
-            except:
-                return s.decode('utf8','replace')
-
-    def handle(self, *args, **options):
-        
-        # Test path
-        if len(args) != 1:
-            raise CommandError("The command has no argument or too much arguments. Only one is needed : the json file path.")
-        
-        # Check if temporary files already exist
-        path = os.path.abspath(args[0])
-        dir = os.path.dirname(path)
-        path_file1 = os.path.join(dir, 'temp_data1.json')
-        path_file2 = os.path.join(dir, 'temp_data2.json')
-        if os.path.exists(path_file1) or os.path.exists(path_file2):
-            confirm = raw_input(("""
-    The folder %s contains the files temp_data1.json or temp_data2.json. These files will be overwritten.
-    
-    Do you want to continue ?
-
-    Type 'y' to continue, or 'n' to quit: """) % dir)
-            do_import = (confirm == "y")
-        
-        # Continue
-        if do_import:
-            # Init ignore list
-            user_ignore_list = ["admin","AnonymousUser"]
-            group_ignore_list = ["everyone"]
-            content_ignore_list = []
-            
-            # Update ignore list
-            ignore_users = options.get('ignore_users', None)
-            ignore_groups = options.get('ignore_groups', None)
-            ignore_contents = options.get('ignore_contents', None)
-            if ignore_users:
-                for u in ignore_users.split(","):
-                    user_ignore_list.append(u)
-            if ignore_groups:
-                for g in ignore_groups.split(","):
-                    group_ignore_list.append(g)
-            if ignore_contents:
-                for c in ignore_contents.split(","):
-                    content_ignore_list.append(c)
-            
-            # Begin work...
-            print("Opening file...")
-            json_file = open(path,'rb')
-            print("Loading datas...")
-            data = json.load(json_file)
-            print("%d objects found..." % len(data))
-            content_pk_id = {}
-            project_pk_id = {}
-            # datas for file 1 : users, medias, contents, projects 
-            data_file1 = []
-            # datas for file 2 : guardian permissions
-            data_file2 = []
-            # users
-            usernames = []
-            for obj in data:
-                if "model" in obj:
-                    m = obj["model"]
-                    if m!="guardian.userobjectpermission" and m!="guardian.groupobjectpermission":
-                        # We remove user admin, user AnonymousUser, group everyone and users and contents in ignore list
-                        # (a bit fuzzy for media and src but good for others)
-                        if not ((m=="auth.user" and "username" in obj["fields"] and obj["fields"]["username"] in user_ignore_list) or \
-                                (m=="auth.group" and "name" in obj["fields"] and obj["fields"]["name"] in group_ignore_list) or \
-                                (m=="ldt_utils.media" and "src" in obj["fields"] and any(s in obj["fields"]["src"] for s in content_ignore_list)) or \
-                                (m=="ldt_utils.content" and "iri_id" in obj["fields"] and obj["fields"]["iri_id"] in content_ignore_list)):
-                            data_file1.append(obj)
-                        #else:
-                        #   print("I don't keep from datas %s, pk = %s" % (m, obj["pk"]))
-                        if "pk" in obj:
-                            # For both contents and projects, we save 2 dicts [id]=pk and [pk]=id
-                            # It will enable to parse and replace easily the old pk by the new ones in the permission datas
-                            if m=="ldt_utils.project":
-                                pk = str(obj["pk"])
-                                id = obj["fields"]["ldt_id"]
-                                project_pk_id[pk] = id
-                            elif m=="ldt_utils.content":
-                                pk = str(obj["pk"])
-                                id = obj["fields"]["iri_id"]
-                                content_pk_id[pk] = id
-                            obj["pk"] = None
-                    else:
-                        obj["pk"] = None
-                        data_file2.append(obj)
-                    # Save usernames except AnonymousUser 
-                    if m=="auth.user" and "username" in obj["fields"] and obj["fields"]["username"]!="AnonymousUser":
-                        usernames.append(obj["fields"]["username"])
-            json_file.close()
-            #data_file1.append(project_pk_id)
-            #data_file1.append(project_id_pk)
-            #data_file1.append(content_pk_id)
-            #data_file1.append(content_id_pk)
-            
-            # We save the datas in a file in order to simply call loaddata
-            print("Writing %s..." % path_file1)
-            file1 =  open(path_file1, 'w')
-            json.dump(data_file1, file1, indent=2)
-            file1.close()
-            print("Updating permissions ids...")
-            # We replace the old pk by the natural keys in the permission datas
-            ignored_project_pks = []
-            ignored_content_pks = []
-            perm_data = []
-            for obj in data_file2:
-                type = obj["fields"]["content_type"][1]
-                old_pk = obj["fields"]["object_pk"]
-                if type=="project":
-                    try:
-                        obj["fields"]["object_pk"] = project_pk_id[old_pk]
-                    except:
-                        # The dumpdata can contain permissions for removed projects
-                        ignored_project_pks.append(old_pk)
-                        continue
-                    # Keeping only valuables objs avoids errors when we we get the new pks
-                    perm_data.append(obj)
-                elif type == "content":
-                    try:
-                        obj["fields"]["object_pk"] = content_pk_id[old_pk]
-                    except:
-                        # The dumpdata can contain permissions for removed contents
-                        ignored_content_pks.append(old_pk)
-                        continue
-                    # Keeping only valuables objs avoids errors when we we get the new pks
-                    perm_data.append(obj)
-            # We inform the user
-            print("%d project permissions were ignored because projects do not exist in the current datas." % len(ignored_project_pks))
-            print("%d content permissions were ignored because contents do not exist in the current datas." % len(ignored_content_pks))
-            print("Loading datas from temporary file %s ..." % path_file1)
-            # Loaddata from file 1
-            call_command("loaddata", path_file1)
-            
-            # Now users, medias, contents, projects have been saved.
-            # We can get the new pk for contents and projects
-            # Careful: in Python 3, dict.copy().values() will be prefered to list(dict.values())
-            # We use select_related("media_obj") because it will usefull with the new group
-            contents = Content.objects.filter(iri_id__in=list(content_pk_id.values())).select_related("media_obj")#.values('pk', 'iri_id')
-            content_id_pk = {}
-            for c in contents:
-                content_id_pk[c.iri_id] = str(c.pk)
-            projects = Project.objects.filter(ldt_id__in=list(project_pk_id.values())).values('pk', 'ldt_id')
-            project_id_pk = {}
-            for p in projects:
-                project_id_pk[p["ldt_id"]] = str(p["pk"])
-            
-            # Now we reparse the perm_data and update with the new pks
-            for obj in perm_data:
-                type = obj["fields"]["content_type"][1]
-                obj_id = obj["fields"]["object_pk"]
-                if type=="project":
-                    obj["fields"]["object_pk"] = project_id_pk[obj_id]
-                elif type == "content":
-                    obj["fields"]["object_pk"] = content_id_pk[obj_id]
-            
-            
-            # We save the datas in a file in order to simply call loaddata
-            print("Writing %s..." % path_file2)
-            file2 =  open(path_file2, 'w')
-            json.dump(perm_data, file2, indent=2)
-            file2.close()
-            print("Loading permissions from temporary file %s ..." % path_file2)
-            call_command("loaddata", path_file2)
-            
-            # Remove temp files
-            print("Removing temporary files...")
-            try:
-                os.remove(path_file1)
-            except:
-                print("Removing temporary files %s failed" % path_file1)
-            try:
-                os.remove(path_file2)
-            except:
-                print("Removing temporary files %s failed" % path_file2)
-            
-            # Now that all datas have been imported we can create the new group and assign permissions if asked
-            new_group = options.get('new_group', None)
-            if new_group and len(usernames)>0:
-                print("Set view permissions for the new group %s ..." % new_group)
-                # Get or create group
-                new_grp, _ = Group.objects.get_or_create(name=new_group)
-                # Add users to the group
-                users = User.objects.filter(username__in=usernames)
-                for u in users:
-                    new_grp.user_set.add(u)
-                # Get all contents and medias
-                for c in contents:
-                    cached_assign('view_content', new_grp, c)
-                    cached_assign('view_media', new_grp, c.media_obj)
-                
-            print("Indexing imported projects ...")
-            call_command('reindex', projects=True, no_content=True)
-        
-        # This is the end
-        print("This is the end")
-        
-        
\ No newline at end of file
Binary file src/ldt/ldt/static/ldt/swf/ldt/LignesDeTempsFlex.swf has changed