add command to merge projects.
authorymh <ymh.work@gmail.com>
Mon, 19 Oct 2015 09:32:42 +0200
changeset 1458 fe2ec4cb6183
parent 1457 055ea84a196e
child 1459 f2b6284bf089
add command to merge projects.
.pylintrc
src/ldt/ldt/api/ldt/resources/content.py
src/ldt/ldt/ldt_utils/modelsutils.py
src/ldt/ldt/ldt_utils/projectserializer.py
src/ldt/ldt/management/commands/mergeprojects.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/.pylintrc	Mon Oct 19 09:32:42 2015 +0200
@@ -0,0 +1,383 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=pylint_django
+
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=lxml
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=W1640,E1607,W1629,W1618,W1606,W1604,I0021,W1612,E1603,E1602,W1626,W1637,W1622,W1617,W1630,W1628,W1624,I0020,E1601,E1605,E1604,W1625,W1623,W1635,W1615,W1614,W1632,W1609,W1601,W1605,W1619,W1610,W1636,W1633,W1611,W1603,W1621,W1613,W0704,W1607,W1620,W1634,W1608,W1602,W1638,W1616,W1639,E1608,E1606,W1627,C0111,C0325,R0912,R0914
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent,objects
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=stringprep,optparse
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
--- a/src/ldt/ldt/api/ldt/resources/content.py	Wed Oct 07 17:41:22 2015 +0200
+++ b/src/ldt/ldt/api/ldt/resources/content.py	Mon Oct 19 09:32:42 2015 +0200
@@ -2,7 +2,8 @@
 from ldt.api.ldt.authentication import (SessionAuthentication, ApiKeyAuthentication)
 from ldt.indexation import get_results_list
 from ldt.ldt_utils.models import Content, Media, Project, Segment
-from ldt.ldt_utils.projectserializer import ProjectJsonSerializer, ProjectMerger
+from ldt.ldt_utils.projectserializer import ProjectJsonSerializer
+from ldt.ldt_utils.modelsutils  import ProjectMerger
 from ldt.security import unprotect_models, protect_models
 import logging
 
@@ -18,25 +19,25 @@
 logger = logging.getLogger(__name__)
 
 class MediaResource(ModelResource):
-    
+
     class Meta:
         allowed_methods = ['get']
         resource_name = 'medias'
         queryset = Media.objects.all()
-    
+
     def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
         if not bundle_or_obj:
-            return super(MediaResource, self).get_resource_uri(bundle_or_obj,url_name)        
-        elif isinstance(bundle_or_obj, Bundle):   
+            return super(MediaResource, self).get_resource_uri(bundle_or_obj,url_name)
+        elif isinstance(bundle_or_obj, Bundle):
             return bundle_or_obj.obj.videopath + bundle_or_obj.obj.stream_src
         else:
             return bundle_or_obj.videopath + bundle_or_obj.stream_src
 
 class ContentResource(ModelResource):
-    
+
     front_project = fields.ForeignKey('ldt.api.ldt.resources.ProjectResource','front_project', null=True, full=False)
     media_url = fields.ForeignKey('ldt.api.ldt.resources.content.MediaResource','media_obj', null=True, full=False)
-    
+
     class Meta:
         allowed_methods = ['get']
         resource_name = 'contents'
@@ -47,7 +48,7 @@
             'title' : ALL,
         }
         ordering = ['title', 'creation_date', 'content_creation_date']
-    
+
     def get_object_list(self, request):
         return Content.safe_objects.select_related('front_project', 'media_obj').all()
 
@@ -70,17 +71,17 @@
         else:
             kwargs['iri_id'] = bundle_or_obj.iri_id
         return self._build_reverse_url("api_dispatch_detail", kwargs=kwargs)
-    
+
     def get_recommended(self, request, **kwargs):
         self.method_check(request, allowed=['get'])
-        
+
         keywords = request.GET.get('keywords','')
         keywords_search = " OR ".join(keywords.split(','))
         field = request.GET.get('field','all')
-        
+
         result_list = get_results_list(Segment, field, keywords_search)
         score_dict = dict([(k,sum([e.score for e in i])) for k,i in groupby(result_list, lambda e: e.iri_id)])
-        
+
         res = [self.full_dehydrate(self.build_bundle(obj=c, request=request)) for c in Content.safe_objects.filter(iri_id__in = score_dict.keys())]
 
         def add_score(b,s):
@@ -92,16 +93,16 @@
         }
 
         self.log_throttled_access(request)
-        
+
         return  self.create_response(request, object_list)
-    
+
     def get_all_projects(self, request, api_name, resource_name, iri_id=None):
         self.method_check(request, allowed=['get'])
         content = get_object_or_404(Content, iri_id=iri_id)
-        
+
         # Unprotect the time to build the project
         unprotect_models()
-        
+
         # add filter
         group_id = request.GET.get("group")
         if group_id is not None :
@@ -109,17 +110,16 @@
             projects = get_objects_for_group(group, "view_project", Project.objects.filter(contents__in=[content], state=2))
         else:
             projects = Project.objects.filter(contents__in=[content], state=2)
-        
+
         pm = ProjectMerger(content, projects)
         proj = pm.get_merged_project(False)
         ps = ProjectJsonSerializer(proj)
         data = ps.serialize_to_cinelab()
         self.log_throttled_access(request)
         # Delete project because it is useless to keep it in database
+        # TODO: remove this, this is ugly. The project object should not be created
         proj.delete()
-        
+
         protect_models()
-        
+
         return self.create_response(request, data)
-        
-            
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldt/ldt/ldt_utils/modelsutils.py	Mon Oct 19 09:32:42 2015 +0200
@@ -0,0 +1,99 @@
+from ldt.ldt_utils.models import Project
+
+from django.contrib.auth import get_user_model
+import lxml.etree
+
+import logging
+
+# pylint: disable=C0103
+logger = logging.getLogger(__name__)
+
+# pylint: disable=C0103
+User = get_user_model()
+
+"""
+Merge several projects in one for a given content. All ensembles are copied into one project
+"""
+# pylint: disable=R0903
+class ProjectMerger(object):
+
+    def __init__(self, content, projects):
+        self.content = content
+        self.projects = projects
+
+    # pylint: disable=R0914
+    def get_merged_project(self, shot_by_shot=True, only_visible=True):
+
+        # New project
+        contents = [self.content,]
+        # Get user
+        user = User.objects.get(username="admin")
+
+        proj = Project.create_project(title="Merged project",
+                                      user=user, contents=contents,
+                                      description=u"", set_icon=False)
+
+        doc = lxml.etree.fromstring(proj.ldt_encoded)
+        annot_node = doc.xpath("/iri/annotations")[0]
+        content_node = lxml.etree.SubElement(annot_node, 'content')
+        content_node.set('id', self.content.iri_id)
+        display_node = doc.xpath('/iri/displays/display')[0]
+        ctt_disp_node = display_node.xpath('content[@id="' + self.content.iri_id + '"]')[0]
+        # remove shot by shot from display
+        if not shot_by_shot:
+            dec_node = ctt_disp_node.xpath('decoupage[@id="de_PPP"]')
+            if len(dec_node) > 0:
+                dec_node = dec_node[0]
+                if dec_node is not None:
+                    ctt_disp_node.remove(dec_node)
+
+        # Parse all projects
+        for p in self.projects:
+            p_xml = lxml.etree.fromstring(p.ldt_encoded)
+            # We only keep the decoupages (cuttings) visible in the default
+            # view, which means the first display.
+            first_display = p_xml.xpath('/iri/displays/display')[0]
+            disp_node_list = first_display.xpath('content[@id="' + self.content.iri_id + '"]')
+            if len(disp_node_list) == 0:
+                # project seems broken passing
+                logger.info(
+                    "Get merged project : this project display %s does not contains the content %s",
+                    p.ldt_id,
+                    self.content.iri_id)
+                continue
+            current_disp_node = disp_node_list[0]
+            # First version of ensemble
+            ens = p_xml.xpath(
+                '/iri/annotations/content[@id="' +
+                self.content.iri_id +
+                '"]/ensemble')
+            for e in ens:
+                content_node.append(e)
+                # Update display
+                for c in e.xpath('decoupage'):
+                    if not only_visible or \
+                        (only_visible and \
+                         len(current_disp_node.xpath('decoupage[@id="' + c.get('id') + '"]')) > 0):
+                        c_node = lxml.etree.SubElement(ctt_disp_node, 'decoupage')
+                        c_node.set(u'idens', e.get('id'))
+                        c_node.set(u'id', c.get('id'))
+            # Second version of ensemble
+            ens = p_xml.xpath('/iri/annotations/content[@id="' +
+                              self.content.iri_id +
+                              '"]/ensembles/ensemble')
+            for e in ens:
+                content_node.append(e)
+                # Update display
+                for c in e.xpath('decoupage'):
+                    if not only_visible or \
+                        (only_visible and \
+                         len(current_disp_node.xpath('decoupage[@id="' +
+                                                     c.get('id') +
+                                                     '"]')) > 0):
+                        c_node = lxml.etree.SubElement(ctt_disp_node, 'decoupage')
+                        c_node.set(u'idens', e.get('id'))
+                        c_node.set(u'id', c.get('id'))
+
+        proj.ldt = lxml.etree.tostring(doc, pretty_print=True)
+
+        return proj
--- a/src/ldt/ldt/ldt_utils/projectserializer.py	Wed Oct 07 17:41:22 2015 +0200
+++ b/src/ldt/ldt/ldt_utils/projectserializer.py	Mon Oct 19 09:32:42 2015 +0200
@@ -20,8 +20,8 @@
 """
 Serialize a project object to a cinelab compatible array
 """
-class ProjectJsonSerializer:
-    
+class ProjectJsonSerializer(object):
+
     def __init__(self, project, from_contents=True, from_display=True, first_cutting=None, only_one_cutting=False):
         self.project = project
         self.parsed = False
@@ -43,27 +43,27 @@
         self.only_one_cutting = only_one_cutting
         # if first_cutting, it means that we limit to the concerned media=
         self.one_content= False
-        
-        
+
+
     def __parse_views(self, display_node_list):
-        
+
         for display_node in display_node_list:
             display_id = display_node.get(u"id", None)
             if not display_id:
                 continue
             content_list = []
             cuttings_list = []
-            
+
             new_display = {
                 "id": display_id,
                 "contents": content_list,
                 "annotation_types": cuttings_list,
             }
-            
+
             for content_node in display_node.xpath("content"):
                 content_id = content_node.get("id")
                 if content_id not in content_list:
-                    content_list.append(content_id)                    
+                    content_list.append(content_id)
                 if content_id not in self.display_contents_list:
                     self.display_contents_list.append(content_id)
                 for cutting_node  in content_node.xpath("decoupage"):
@@ -75,40 +75,40 @@
                     ensemble_id = cutting_node.get("idens")
                     if ensemble_id not in self.display_ensemble_list:
                         self.display_ensemble_list.append(ensemble_id)
-            
-            
+
+
             # sets cutting to display in first position for the metadataplayer
             if self.first_cutting:
-                
+
                 annotation_types = new_display['annotation_types']
-                    
+
                 if self.first_cutting not in annotation_types:
                     annotation_types.append(self.first_cutting)
-                    
+
                 index = -1
                 for i, s in enumerate(annotation_types):
-                    if s == self.first_cutting: 
+                    if s == self.first_cutting:
                         index = i
                         break
-                        
-                annotation_types[0], annotation_types[index] = annotation_types[index], annotation_types[0]                        
-            
+
+                annotation_types[0], annotation_types[index] = annotation_types[index], annotation_types[0]
+
             if self.only_one_cutting:
-                new_display['annotation_types'] = [new_display['annotation_types'][0]] 
-            
+                new_display['annotation_types'] = [new_display['annotation_types'][0]]
+
             self.views_dict[display_id] = new_display
-        
-    
+
+
     def __parse_ensemble(self, ensemble_node, content, cutting_only=None):
-                
+
         ensemble_id = ensemble_node.attrib[u"id"]
-        
+
         ensemble_author = ensemble_node.attrib[u"author"]
         ensemble_title = ensemble_node.attrib[u"title"]
         ensemble_description = ensemble_node.attrib[u"abstract"]
         ensemble_created = datetime.utcnow().isoformat()
-        ensemble_modified = ensemble_created 
-        
+        ensemble_modified = ensemble_created
+
         list_items = []
         new_list = {
             "id" : ensemble_id,
@@ -124,16 +124,16 @@
                 "editable":"false"
             }
         }
-        
+
         if cutting_only:
             cuttings_list = cutting_only
         else:
-            cuttings_list = ensemble_node             
-                
-        for decoupage_node in cuttings_list:            
+            cuttings_list = ensemble_node
+
+        for decoupage_node in cuttings_list:
             if decoupage_node.tag != "decoupage" :
                 continue
-            
+
             decoupage_id = decoupage_node.attrib[ u"id"]
             if not cutting_only and self.from_display and decoupage_id not in self.display_cuttings_list:
                 continue
@@ -142,7 +142,7 @@
                 decoupage_creator = self.project.owner.username
             if not decoupage_creator:
                 decoupage_creator = "IRI"
-            
+
             decoupage_contributor = decoupage_creator
             date_str = decoupage_node.get(u"date")
             decoupage_created = None
@@ -152,21 +152,21 @@
                         decoupage_created = datetime.strptime(date_str, date_format).isoformat()
                         break
                     except Exception:
-                        decoupage_created = None   
+                        decoupage_created = None
             if decoupage_created is None:
                 decoupage_created = datetime.utcnow().isoformat()
             decoupage_modified = decoupage_created
-            
+
             decoupage_title = ""
-            for txtRes in decoupage_node.xpath("title/text()", smart_strings=False): 
-                    decoupage_title += txtRes
+            for txtRes in decoupage_node.xpath("title/text()", smart_strings=False):
+                decoupage_title += txtRes
 
             decoupage_description = ""
-            for txtRes in decoupage_node.xpath("abstract/text()", smart_strings=False): 
-                    decoupage_description += txtRes
+            for txtRes in decoupage_node.xpath("abstract/text()", smart_strings=False):
+                decoupage_description += txtRes
 
             list_items.append({"id-ref":decoupage_id})
-                        
+
             new_annotation_types = {
                 "id":decoupage_id,
                 "dc:creator":decoupage_creator,
@@ -176,13 +176,13 @@
                 "dc:title":decoupage_title,
                 "dc:description":decoupage_description
             }
-            
-            self.annotation_types_dict[decoupage_id] = new_annotation_types                
+
+            self.annotation_types_dict[decoupage_id] = new_annotation_types
             self.annotations_by_annotation_types[decoupage_id] = []
-                      
+
             res = decoupage_node.xpath("elements/element")
             for element_node in res:
-                
+
                 element_id = element_node.attrib[u"id"]
                 element_begin = element_node.attrib[u"begin"]
                 element_duration = element_node.attrib[u"dur"]
@@ -190,44 +190,44 @@
                 element_color = element_node.attrib.get(u"color", "")
                 element_ldt_src = element_node.attrib.get(u"src", "")
                 element_created = element_node.attrib["date"]
-                
-                element_title = reduce_text_node(element_node, "title/text()")        
-                element_description = reduce_text_node(element_node, "abstract/text()")                
-                
+
+                element_title = reduce_text_node(element_node, "title/text()")
+                element_description = reduce_text_node(element_node, "abstract/text()")
+
                 element_source_node_list = element_node.xpath("meta/source")
-                
+
                 if len(element_source_node_list) > 0:
                     element_source_node = element_source_node_list[0]
                     element_source = {"mimetype" :element_source_node.get(u'mimetype'), "url":element_source_node.get(u'url'), "content":reduce_text_node(element_source_node)}
                 else:
                     element_source = None
-                
+
                 element_audio_src = ""
                 element_audio_href = ""
                 res = element_node.xpath("audio")
                 if len(res) > 0:
                     element_audio_src = res[0].get(u"source", u"")
                     element_audio_href = res[0].text
-                
+
                 element_tags = []
-                
+
                 tags = element_node.get(u"tags", u"")
-                
-                tags_list = map(lambda s:s.strip(), tags.split(","))
 
-                #tags                                
+                tags_list = map(lambda s: s.strip(), tags.split(","))
+
+                #tags
                 if tags is None or len(tags) == 0:
                     tags_list = []
                     restagnode = element_node.xpath("tag/text()", smart_strings=False)
                     for tagnode in restagnode:
                         tags_list.append(tagnode)
-                        
+
                 if tags_list is None or len(tags_list) == 0:
                     tags_list = []
                     restagnode = element_node.xpath("tags/tag/text()", smart_strings=False)
                     for tagnode in restagnode:
                         tags_list.append(tagnode)
-                
+
                 tag_date = datetime.utcnow().isoformat()
                 for tag_title in tags_list:
                     if tag_title not in self.tags:
@@ -251,13 +251,13 @@
 
                 if not element_tags:
                     element_tags = None
-                
+
                 annot_creator = element_node.attrib[u"author"]
                 if annot_creator=="perso":
                     annot_creator = decoupage_creator
                 if not annot_creator:
                     annot_creator = decoupage_creator
-                
+
                 new_annotation = {
                     "begin": int(float(element_begin)),
                     "end": int(float(element_begin)) + int(float(element_duration)),
@@ -289,10 +289,10 @@
                         "dc:modified": decoupage_modified,
                     }
                 }
-                               
+
                 if element_source:
                     new_annotation['meta']['dc:source'] = element_source
-                
+
                 # Metadatacomposer features. An annotation can have the usual datas (title, description...)
                 # and new kinds of extra metas : video, audio, text, links array, images slideshow
                 # Get type
@@ -326,27 +326,27 @@
                         image_nodes = element_node.xpath("meta/images/image")
                         for image in image_nodes:
                             new_annotation["content"]["images"].append({"url": reduce_text_node(image, "url/text()"), "title":reduce_text_node(image, "title/text()"), "description":reduce_text_node(image, "description/text()")})
-                                
+
                 self.annotations_dict[element_id] = new_annotation
                 self.annotations_by_annotation_types[decoupage_id].append(new_annotation)
-        
+
         if not list_items:
             new_list["items"] = None
         self.lists_dict[ensemble_id] = new_list
-        
+
 
 
     def __parse_ldt(self):
-        
+
         self.ldt_doc = lxml.etree.fromstring(self.project.ldt_encoded)
-        
+
         if self.from_display:
             xpath_str = "/iri/displays/display[position()=1]"
             if isinstance(self.from_display, basestring):
                 xpath_str = "/iri/displays/display[@id='%s']" % self.from_display
-            
+
             self.__parse_views(self.ldt_doc.xpath(xpath_str))
-        
+
         # getting all contents at once
         # If self.one_content, we remove the other content
         if self.first_cutting and self.one_content:
@@ -359,13 +359,13 @@
                 set(self.ldt_doc.xpath("/iri/annotations/content/@id")) |
                 (set(self.ldt_doc.xpath('/iri/annotations/content[ensemble/decoupage/@id=\'%s\']/@id' % self.first_cutting)) if self.first_cutting and self.first_cutting not in self.display_cuttings_list else set())
             )
-        
+
         contents =  dict([ (c.iri_id, c) for c in Content.objects.filter(iri_id__in=contents_iri_id).select_related('media_obj', 'stat_annotation').prefetch_related("authors")])
         m_cls = ContentType.objects.get(model='media')
         m_cls = m_cls.model_class()
         medias = dict([ (m.id, m) for m in m_cls.safe_objects.filter(id__in = [c.media_obj.id for c in contents.values() if c.media_obj])])
-          
-        
+
+
         res = self.ldt_doc.xpath("/iri/medias/media")
         for mediaNode in res:
             iri_id = mediaNode.attrib[u"id"]
@@ -373,7 +373,7 @@
                 continue
             content = contents[iri_id]#Content.objects.get(iri_id=iri_id) #@UndefinedVariable
             self.__parse_content(content, medias)
-            
+
         res = self.ldt_doc.xpath("/iri/annotations/content")
         for content_node in res:
             content_id = content_node.attrib[u"id"]
@@ -387,7 +387,7 @@
                 if self.from_display and ensemble_id not in self.display_ensemble_list:
                     continue
                 self.__parse_ensemble(ensemble_node, content)
-                
+
         if self.first_cutting and self.first_cutting not in self.display_cuttings_list:
             cutting_node= self.ldt_doc.xpath('/iri/annotations/content/ensemble/decoupage[@id=\'%s\']' % self.first_cutting)[0]
             ensemble_node = cutting_node.xpath('..')[0]
@@ -395,8 +395,8 @@
             iri_id = content_node.get("id")
             content = contents[iri_id]#Content.objects.get(iri_id=iri_id)
             self.__parse_ensemble(ensemble_node, content, cutting_only=[cutting_node])
-            
-        
+
+
         #reorder annotations and annotation type from view
         if self.from_display and len(self.views_dict) > 0:
             new_annotation_types_dict = OrderedDict()
@@ -406,30 +406,30 @@
                     new_annotation_types_dict[annotation_type] = self.annotation_types_dict[annotation_type]
                     for annot in self.annotations_by_annotation_types[annotation_type]:
                         new_annotations_dict[annot['id']] = annot
-                    
+
             self.annotations_dict = new_annotations_dict
             self.annotation_types_dict = new_annotation_types_dict
-            
+
         # We add the first "bout a bout". It is an "edit" in ldt format and list/listtype="mashup" in json format
         self.__parse_edits()
-                               
+
         self.parsed = True
-        
-    
+
+
     def __parse_edits(self):
-        
+
         editings = self.ldt_doc.xpath("/iri/edits/editing")
         if not editings:
             return False
         editing = self.ldt_doc.xpath("/iri/edits/editing[position()=1]")[0]
-        e_id = editing.get("id");
+        e_id = editing.get("id")
         eList = editing.xpath("edit[position()=1]/eList")[0]
         d = datetime.utcnow().isoformat()
         e_title = ""
-        for txtRes in editing.xpath("title/text()", smart_strings=False): 
+        for txtRes in editing.xpath("title/text()", smart_strings=False):
             e_title += txtRes
         e_description = ""
-        for txtRes in editing.xpath("abstract/text()", smart_strings=False): 
+        for txtRes in editing.xpath("abstract/text()", smart_strings=False):
             e_description += txtRes
         list_items = []
         for item in eList:
@@ -452,40 +452,40 @@
                 }
             }
             self.lists_dict[e_id] = new_list
-    
+
     def __parse_content(self, content, medias):
-        
+
         doc = lxml.etree.parse(content.iri_file_path())
-        
+
         authors = content.authors.all()
-        
+
         if len(authors) > 0 :
             author = authors[0].handle
         else :
             author = "IRI"
-        
+
         if len(authors) > 1 :
             contributor = authors[1].handle
         else :
             contributor = author
-        
+
         content_author = ""
-        
+
         res = doc.xpath("/iri/head/meta[@name='author']/@content")
         if len(res) > 0:
             content_author = res[0]
-        
-        
+
+
         content_date = ""
-        
+
         res = doc.xpath("/iri/head/meta[@name='date']/@content")
         if len(res) > 0:
             content_date = res[0]
 
         url = ""
         meta_item_value = ""
-        
-        if content.media_obj and content.media_obj.id not in medias:   
+
+        if content.media_obj and content.media_obj.id not in medias:
             url = settings.FORBIDDEN_STREAM_URL
         elif content.videopath:
             url = content.videopath.rstrip('/') + "/" + content.src
@@ -494,90 +494,90 @@
             url = content.src
 
         new_media = {
-             "http://advene.liris.cnrs.fr/ns/frame_of_reference/ms" : "o=0",
-             "id" : content.iri_id,
-             "url" : url,
-             "unit" : "ms",
-             "origin" : "0",
-             "meta": {
-                 "dc:creator" : author,
-                 "dc:created" : content.creation_date.isoformat(),
-                 "dc:contributor" : contributor,
-                 "dc:modified" : content.update_date.isoformat(),
-                 "dc:creator.contents" : content_author,
-                 "dc:created.contents" : content_date,
-                 "dc:title" : content.title,
-                 "dc:description" : content.description,
-                 "dc:duration" : content.get_duration(),
-                 "item": {
-                     "name" : "streamer",
-                     "value": meta_item_value,
-                 },
-             }
+            "http://advene.liris.cnrs.fr/ns/frame_of_reference/ms" : "o=0",
+            "id" : content.iri_id,
+            "url" : url,
+            "unit" : "ms",
+            "origin" : "0",
+            "meta": {
+                "dc:creator" : author,
+                "dc:created" : content.creation_date.isoformat(),
+                "dc:contributor" : contributor,
+                "dc:modified" : content.update_date.isoformat(),
+                "dc:creator.contents" : content_author,
+                "dc:created.contents" : content_date,
+                "dc:title" : content.title,
+                "dc:description" : content.description,
+                "dc:duration" : content.get_duration(),
+                "item": {
+                    "name" : "streamer",
+                    "value": meta_item_value,
+                },
+            }
         }
-        
+
         self.medias_dict[content.iri_id] = new_media
-        
+
         new_display = {
             "id": "stat",
             "contents": [content.iri_id],
             "meta": {
-                     "stat": get_string_from_buckets(content.annotation_volume),
-                     }
+                "stat": get_string_from_buckets(content.annotation_volume),
+            }
         }
-        
+
         self.views_dict['test'] = new_display
-        
+
         if self.serialize_contents:
             res = doc.xpath("/iri/body/ensembles/ensemble")
             for ensemble_node in res:
                 self.__parse_ensemble(ensemble_node, content)
 
-    
+
     def serialize_to_cinelab(self, one_content_param=False):
-    
+
         res = {}
-        
+
         self.one_content = one_content_param
 
         if not self.parsed:
             self.__parse_ldt()
-        
+
         project_main_media = ""
         if len(self.medias_dict) > 0:
             project_main_media = self.medias_dict.iteritems().next()[1]["id"]
-        
+
         res['meta'] = {
-             'id': self.project.ldt_id,
-             'dc:created':self.project.creation_date.isoformat(),
-             'dc:modified':self.project.modification_date.isoformat(),
-             'dc:contributor':self.project.changed_by,
-             'dc:creator':self.project.created_by,
-             'dc:title':self.project.title,
-             'dc:description':self.project.get_description(self.ldt_doc), # get from doc, parse ldt
-             'main_media': {"id-ref":project_main_media}
-            }
-                
-                    
+            'id': self.project.ldt_id,
+            'dc:created':self.project.creation_date.isoformat(),
+            'dc:modified':self.project.modification_date.isoformat(),
+            'dc:contributor':self.project.changed_by,
+            'dc:creator':self.project.created_by,
+            'dc:title':self.project.title,
+            'dc:description':self.project.get_description(self.ldt_doc), # get from doc, parse ldt
+            'main_media': {"id-ref":project_main_media}
+        }
+
+
         res['medias'] = self.medias_dict.values() if len(self.medias_dict) > 0 else None
         res['lists'] = self.lists_dict.values() if len(self.lists_dict) > 0 else None
         res['tags'] = self.tags.values() if len(self.tags) > 0 else None
         res['views'] = self.views_dict.values() if len(self.views_dict) > 0 else None
-        
+
         res['annotation-types'] = self.annotation_types_dict.values() if len(self.annotation_types_dict) > 0 else None
         res['annotations'] = self.annotations_dict.values() if len(self.annotations_dict) > 0 else None
-        
+
         res['@context'] = { "dc": "http://purl.org/dc/elements/1.1/" }
-        
-        return res 
-    
+
+        return res
+
     def get_annotations(self, first_cutting=True):
-        
+
         if not self.parsed:
             self.__parse_ldt()
-        
+
         annotations = []
-        
+
         current_cutting = None
         uri = None
         for annot in self.annotations_dict.values():
@@ -596,7 +596,7 @@
             if content.media_obj and content.media_obj.external_publication_url:
                 uri = "%s#t=%d" % (content.media_obj.external_publication_url, begin)
 
-        
+
             annotations.append({
                 'begin': begin,
                 'duration':duration,
@@ -606,79 +606,79 @@
                 'id':annot['id'],
                 'uri':uri
             })
-            
+
         return annotations
 
 """
 Quick and dirty converter from cinelab JSON to ldt format.
 Does not support imports, mutliple medias, or media creation
-"""   
+"""
 class JsonCinelab2Ldt:
-    
+
     def create_json(self, json):
-            
+
         medias = json['medias']
-        contentList = [] 
+        contentList = []
         for media in medias:
             c = Content.objects.get(iri_id=media['id'])
             if c != None:
                 contentList.append(c)
-            
-        meta = json['meta']            
+
+        meta = json['meta']
         creator = meta['creator']
         contributor = meta['contributor']
-            
+
         user = User.objects.get(username=creator)
         project = Project.create_project(user, creator + '_' + contributor, contentList)
         project.changed_by = contributor
-            
+
         ldtdoc = lxml.etree.fromstring(project.ldt_encoded)
         element = ldtdoc.xpath('/iri/annotations')
-            
+
         for media in contentList:
             content = lxml.etree.Element('content')
-            content.set('id', media.iri_id)              
-                      
+            content.set('id', media.iri_id)
+
         annotation_types = json['annotation_types']
         cuttings = {}
         if len(annotation_types) > 0:
             media = lxml.etree.SubElement(element[0], 'content')
             media.set('id', medias[0]['id'])
-                
+
             ens = lxml.etree.SubElement(media, 'ensemble')
             ens.set('title', 'Decoupages personnels')
             ens.set('idProject', project.ldt_id)
             ens.set('abstract', '')
             ens.set('id', 'g_' + str(uuid.uuid1()))
-            
+
             for i in annotation_types:
                 cutting_infos = {'desc' : i['meta']['description']}
-                
+
                 dec = lxml.etree.SubElement(ens, 'decoupage')
                 dec.set('author', contributor)
                 dec.set('id', 'c_' + str(uuid.uuid1()))
                 elements_list = lxml.etree.SubElement(dec, 'elements')
-                    
+
                 title = lxml.etree.SubElement(dec, 'title')
                 title.text = i['id']
-                
+
                 abstract = lxml.etree.SubElement(dec, 'abstract')
                 abstract.text = i['meta']['description']
-                
+
                 cutting_infos['xml_node'] = elements_list
-                cuttings[i['id']] = cutting_infos 
-                   
-                 
-        annotations = json['annotations']            
+                cuttings[i['id']] = cutting_infos
+
+
+        annotations = json['annotations']
         for i in annotations:
             cutting_infos = cuttings[i['type']]
             elements_node = cutting_infos['xml_node']
             element = lxml.etree.SubElement(elements_node, 'element')
-            
+
             element.set('begin', str(i['begin']))
             element.set('dur', str(i['end'] - i['begin']))
             element.set('id', 's_' + str(uuid.uuid1()))
-            
+
             title = lxml.etree.SubElement(element, 'title')
             audio = lxml.etree.SubElement(element, 'audio')
             audio.set('source', 'undefined')
@@ -688,82 +688,9 @@
             for tag in i['tags']:
                 tag_xml = lxml.etree.SubElement(tags, 'tag')
                 tag_xml.text = tag
-                    
-            
-        project.ldt = lxml.etree.tostring(ldtdoc, pretty_print=True)
-        project.save()
-        
-        return project.ldt
 
 
-"""
-Merge several projects in one for a given content. All ensembles are copied into one project
-"""
-class ProjectMerger:
-    
-    def __init__(self, content, projects):
-        self.content = content
-        self.projects = projects
-        
-    def get_merged_project(self, shot_by_shot=True, only_visible=True):
-        # New project
-        contents = [ self.content, ]
-        
-        # Get user
-        user = User.objects.get(username="admin")
-        
-        proj = Project.create_project(title="Merged project",
-                     user=user, contents=contents, 
-                     description=u"", set_icon=False)
-        
-        doc = lxml.etree.fromstring(proj.ldt_encoded)
-        annot_node = doc.xpath("/iri/annotations")[0]
-        content_node = lxml.etree.SubElement(annot_node, 'content')
-        content_node.set('id', self.content.iri_id)
-        display_node = doc.xpath('/iri/displays/display')[0]
-        ctt_disp_node = display_node.xpath('content[@id="' + self.content.iri_id + '"]')[0]
-        # remove shot by shot from display
-        if not shot_by_shot:
-            dec_node = ctt_disp_node.xpath('decoupage[@id="de_PPP"]')
-            if len(dec_node)>0:
-                dec_node = dec_node[0]
-                if dec_node is not None:
-                    ctt_disp_node.remove(dec_node)
-        
-        # Parse all projects
-        for p in self.projects:
-            p_xml = lxml.etree.fromstring(p.ldt_encoded)
-            # We only keep the decoupages (cuttings) visible in the default view, which means the first display.
-            first_display = p_xml.xpath('/iri/displays/display')[0]
-            disp_node_list = first_display.xpath('content[@id="' + self.content.iri_id + '"]')
-            if len(disp_node_list) == 0:
-                # project seems broken passing
-                logger.info("Get merged project : this project display %s does not contains the content %s", p.ldt_id, self.content.iri_id)
-                continue
-            current_disp_node = disp_node_list[0]
-            # First version of ensemble
-            ens = p_xml.xpath('/iri/annotations/content[@id="' + self.content.iri_id + '"]/ensemble')
-            for e in ens:
-                content_node.append(e)
-                # Update display
-                for c in e.xpath('decoupage'):
-                    if not only_visible or (only_visible and len(current_disp_node.xpath('decoupage[@id="' + c.get('id') + '"]'))>0 ) :
-                        c_node = lxml.etree.SubElement(ctt_disp_node, 'decoupage')
-                        c_node.set(u'idens', e.get('id'))
-                        c_node.set(u'id', c.get('id'))
-            # Second version of ensemble
-            ens = p_xml.xpath('/iri/annotations/content[@id="' + self.content.iri_id + '"]/ensembles/ensemble')
-            for e in ens:
-                content_node.append(e)
-                # Update display
-                for c in e.xpath('decoupage'):
-                    if not only_visible or (only_visible and len(current_disp_node.xpath('decoupage[@id="' + c.get('id') + '"]'))>0 ) :
-                        c_node = lxml.etree.SubElement(ctt_disp_node, 'decoupage')
-                        c_node.set(u'idens', e.get('id'))
-                        c_node.set(u'id', c.get('id'))
-        
-        proj.ldt = lxml.etree.tostring(doc, pretty_print=True)
-        
-        return proj
-        
-        
+        project.ldt = lxml.etree.tostring(ldtdoc, pretty_print=True)
+        project.save()
+
+        return project.ldt
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldt/ldt/management/commands/mergeprojects.py	Mon Oct 19 09:32:42 2015 +0200
@@ -0,0 +1,154 @@
+import itertools
+
+from django.contrib.auth import get_user_model
+from django.core.management.base import NoArgsCommand, CommandError
+from django.db.models import Q
+
+from ldt.ldt_utils.models  import Project, Content
+from ldt.ldt_utils.modelsutils  import ProjectMerger
+
+
+class Command(NoArgsCommand):
+    """
+    Merge projects.
+    Beware this version of the command correctly support merging projects from only one content.
+    Usage:
+        -p <project_id>: project id, can have more than one
+        -c <content_id>: content id, can have more than one
+        -t <title>: project title (default merge of "projects title")
+        -u <user>: user for project (defaut admin)
+        --only-visible: only visible cuttings (default false)
+        --exclude-shot-by-shot: remove shot by shot (default false)
+        --all-published: merge all projects published or not (default false)
+    """
+    help = 'Merge projects'
+
+    def add_arguments(self, parser):
+
+        # Named (optional) arguments
+        parser.add_argument(
+            '--only-visible',
+            action='store_true',
+            dest='visible',
+            default=False,
+            help='merge only visible cuttings')
+
+        parser.add_argument(
+            '--exclude-shot-by-shot',
+            action='store_true',
+            dest='exclude_shot_by_shot',
+            default=False,
+            help='exclude shot by shot cuttings')
+
+        parser.add_argument(
+            '--all-published',
+            action='store_true',
+            dest='all_published',
+            default=False,
+            help='merge all projects published or not')
+
+        parser.add_argument(
+            '-p', '--project',
+            action='append',
+            dest='projects',
+            metavar='PROJECT_ID',
+            default=[],
+            help='project id to concatenate')
+
+        parser.add_argument(
+            '-c', '--content',
+            action='store',
+            dest='contents',
+            default=None,
+            metavar='CONTENT_ID',
+            help='content id to concatenate')
+
+        parser.add_argument(
+            '-t', '--title',
+            action='store',
+            dest='title',
+            metavar='TITLE',
+            help='The title for the merged project. defaut "merged: title1, title2,..."'
+        )
+
+        parser.add_argument(
+            '-u', '--user',
+            action='store',
+            dest='user',
+            metavar='USER',
+            help='The user creator of the merged project'
+        )
+
+
+    def handle_noargs(self, **options):
+
+        # pylint: disable=C0103
+        User = get_user_model()
+
+        username = options.get('user', None)
+
+        if not username:
+            users = User.objects.filter(is_superuser=True).order_by('date_joined')
+            if len(users) > 0:
+                user = users[0]
+                username = user.username
+            else:
+                raise CommandError("No username given and can not fond a superuser")
+        else:
+            user = User.objects.get(username=username)
+
+        # filter projects
+        project_filter = Q()
+
+        projects_ids = options.get('projects', None)
+        if projects_ids:
+            project_filter &= Q(ldt_id__in=projects_ids)
+
+        content_id = options.get('contents', None)
+        if content_id:
+            project_filter = Q(contents__iri_id__in=[content_id,])
+
+        if not projects_ids and not content_id:
+            raise CommandError("At least one content or project must be specified")
+
+        if not options.get('all_published', False):
+            project_filter &= Q(state=Project.PUBLISHED)
+
+        projects = Project.objects.filter(project_filter)
+
+        if len(projects) == 0:
+            raise CommandError("No project found, aborting")
+
+
+        title = options.get('title', "")
+        if not title:
+            title = "Merged: " + ", ".join([p.title.strip() for p in projects])
+
+        contents = set(itertools.chain(*[proj.contents.all() for proj in projects]))
+        if len(contents) == 0:
+            raise CommandError("Content not found")
+        if len(contents) > 1 and not content_id:
+            raise CommandError("Too many contents %d" % len(contents))
+
+        if content_id:
+            content = Content.objects.get(iri_id=content_id)
+        else:
+            content = contents[0]
+
+        if options.get('verbosity', 1) > 0:
+            print("Merging %d projects in \'%s\'" % (len(projects), title))
+
+        pmerger = ProjectMerger(content, projects)
+
+        proj = pmerger.get_merged_project(
+            only_visible=not options.get('visible', False),
+            shot_by_shot=not options.get('exclude_shot_by_shot', False))
+
+        proj.created_by = username
+        proj.changed_by = username
+        proj.owner = user
+        proj.title = title
+        proj.save()
+
+        if options.get('verbosity', 1) > 0:
+            print("Project \'%s\' created" % (title))