--- a/.settings/org.eclipse.core.resources.prefs Sun Feb 10 23:04:28 2013 +0100
+++ b/.settings/org.eclipse.core.resources.prefs Wed Feb 27 00:07:54 2013 +0100
@@ -6,6 +6,7 @@
encoding//src/ldt/ldt/indexation/models.py=utf-8
encoding//src/ldt/ldt/indexation/query_parser.py=utf-8
encoding//src/ldt/ldt/indexation/search_indexes.py=utf-8
+encoding//src/ldt/ldt/indexation/signals.py=utf-8
encoding//src/ldt/ldt/indexation/tests.py=utf-8
encoding//src/ldt/ldt/ldt_utils/events.py=utf-8
encoding//src/ldt/ldt/ldt_utils/migrations/0001_initial.py=utf-8
--- a/src/ldt/ldt/indexation/__init__.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/indexation/__init__.py Wed Feb 27 00:07:54 2013 +0100
@@ -1,5 +1,7 @@
+from .backends import elasticsearch_backend as ldt_elasticsearch_backend
from django.conf import settings
-
+from haystack import connections
+from haystack.constants import DEFAULT_ALIAS
from haystack.query import SearchQuerySet
from ldt.indexation.highlighter import LdtHighlighter as Highlighter
from ldt.indexation.query_parser import QueryParser
@@ -7,6 +9,7 @@
from ldt.text.models import Annotation
import re
import sys
+
def get_results_with_context(field, query, content_list=None, highlight=True):
@@ -82,6 +85,62 @@
return results_list
+def object_delete(model, **kwargs):
+
+
+ kwargs_filter = kwargs.copy()
+ kwargs_filter.pop('using', None)
+
+ # here we do a poor man transaction management.
+ # the is no clear transaction management in Haystack.
+ # therefore, we give priority to the database and delete there first.
+ # if there is an error there, the index will not be updated.
+
+ objs = list(model.objects.filter(**kwargs_filter))
+
+ model.objects.filter(**kwargs_filter).delete()
+
+ using = None
+ if 'using' in kwargs:
+ using = kwargs.get('using', None)
+ if not using:
+ using = DEFAULT_ALIAS
+
+ conn = connections[using]
+
+ if isinstance(conn, ldt_elasticsearch_backend.ElasticsearchSearchEngine):
+ conn.get_backend().remove(objs, commit=True)
+ else:
+ for o in objs:
+ conn.get_backend().remove(o, commit=True)
+
+def object_insert(model, object_list, func_key, using = None):
+
+ if not object_list:
+ return
+
+ model.objects.bulk_create(object_list)
+
+ obj_dict = dict(model.objects.filter(**{func_key+'__in':[getattr(o, func_key) for o in object_list]}).values_list(func_key,"id"))
+
+ for o in object_list:
+ o.id = obj_dict[getattr(o,func_key)]
+
+ if not using:
+ using = DEFAULT_ALIAS
+
+ conn = connections[using]
+
+ backend = conn.get_backend()
+ unified_index = conn.get_unified_index()
+
+ index = unified_index.get_index(model)
+
+ backend.update(index, object_list)
+
+
+
+
class SimpleSearch(object):
def query(self, field, query):
--- a/src/ldt/ldt/indexation/backends/elasticsearch_backend.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/indexation/backends/elasticsearch_backend.py Wed Feb 27 00:07:54 2013 +0100
@@ -7,8 +7,21 @@
from django.db.models.loading import get_model
from haystack.backends import BaseEngine, SearchResult, elasticsearch_backend
from haystack.constants import DJANGO_CT, DJANGO_ID
+from haystack.exceptions import MissingDependency
+from haystack.utils import get_identifier
from ldt.ldt_utils.models import Segment
+import collections
import datetime
+try:
+ import requests
+except ImportError:
+ raise MissingDependency("The 'elasticsearch' backend requires the installation of 'requests'.")
+try:
+ import pyelasticsearch
+except ImportError:
+ raise MissingDependency("The 'elasticsearch' backend requires the installation of 'pyelasticsearch'. Please refer to the documentation.")
+
+
class ElasticsearchSearchBackend(elasticsearch_backend.ElasticsearchSearchBackend):
@@ -110,6 +123,27 @@
'facets': facets,
'spelling_suggestion': spelling_suggestion,
}
+
+ def remove(self, obj_or_string, commit=True):
+
+ if not self.setup_complete:
+ try:
+ self.setup()
+ except (requests.RequestException, pyelasticsearch.ElasticHttpError), e:
+ if not self.silently_fail:
+ raise
+
+ self.log.error("Failed to remove document '%s' from Elasticsearch: %s", repr(obj_or_string), e)
+ return
+
+ if isinstance(obj_or_string, collections.Iterable) and not isinstance(obj_or_string, basestring):
+ ids = [get_identifier(elt) for elt in obj_or_string]
+ if not ids:
+ return
+ q = {'ids' : {'values' : ids}}
+ self.conn.delete_by_query(self.index_name, 'modelresult', q)
+ else:
+ return super(ElasticsearchSearchBackend, self).remove(obj_or_string, commit=commit)
class ElasticsearchSearchEngine(BaseEngine):
--- a/src/ldt/ldt/indexation/query_parser.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/indexation/query_parser.py Wed Feb 27 00:07:54 2013 +0100
@@ -7,10 +7,13 @@
#TODO: unitest for
-from whoosh.qparser import SimpleParser, FieldsPlugin, OperatorsPlugin, PhrasePlugin, SingleQuotePlugin, GroupPlugin, PrefixPlugin, GtLtPlugin, RangePlugin
-from whoosh.query import Term, And, AndMaybe, Or, AndNot, Not, Phrase, Prefix, TermRange
+from django.conf import settings
from haystack.query import SQ
-from django.conf import settings
+from whoosh.qparser import (SimpleParser, FieldsPlugin, OperatorsPlugin,
+ PhrasePlugin, SingleQuotePlugin, GroupPlugin, PrefixPlugin, GtLtPlugin,
+ RangePlugin)
+from whoosh.query import (Term, And, AndMaybe, Or, AndNot, Not, Phrase, Prefix,
+ TermRange)
HAYSTACK_DEFAULT_OPERATOR = getattr(settings,'HAYSTACK_DEFAULT_OPERATOR','AND')
@@ -158,16 +161,16 @@
current_node, current_connector = self.current_node_stack[-1]
current_node.add(new_node, current_connector)
- def __convert_nb(self, str):
+ def __convert_nb(self, str_nb):
try:
- res = int(str)
+ res = int(str_nb)
return res
except ValueError:
try:
- res = float(str)
+ res = float(str_nb)
return res
except ValueError:
- return str
+ return str_nb
\ No newline at end of file
--- a/src/ldt/ldt/indexation/search_indexes.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/indexation/search_indexes.py Wed Feb 27 00:07:54 2013 +0100
@@ -6,10 +6,10 @@
'''
from haystack import indexes
-from ldt.ldt_utils.models import Segment
+from ldt.ldt_utils.models import Segment
from ldt.text.models import Annotation
-class SegmentIndex(indexes.RealTimeSearchIndex, indexes.Indexable):
+class SegmentIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
iri_id = indexes.CharField(model_attr='iri_id', indexed=False, stored=True)
project_id = indexes.CharField(model_attr='project_id', indexed=False, stored=True, null=True)
@@ -27,7 +27,7 @@
def get_model(self):
return Segment
-class AnnotationIndex(indexes.RealTimeSearchIndex, indexes.Indexable):
+class AnnotationIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
tags = indexes.CharField(model_attr='tags', indexed=True, stored=False)
title = indexes.CharField(model_attr='title', indexed=True, stored=True)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/ldt/ldt/indexation/signals.py Wed Feb 27 00:07:54 2013 +0100
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+'''
+Created on Feb 22, 2013
+
+@author: ymh
+'''
+from django.db import models
+from haystack import signals
+from ldt.ldt_utils.models import Segment
+from ldt.text.models import Annotation
+
+class LdtSignalProcessor(signals.BaseSignalProcessor):
+
+ def __connect_signals(self, klass):
+ models.signals.post_save.connect(self.handle_save, sender=klass)
+ models.signals.post_delete.connect(self.handle_delete, sender=klass)
+
+ def __disconnect_signals(self, klass):
+ models.signals.post_save.disconnect(self.handle_save, sender=klass)
+ models.signals.post_delete.disconnect(self.handle_delete, sender=klass)
+
+
+ def setup(self):
+ self.__connect_signals(Segment)
+ self.__connect_signals(Annotation)
+
+
+
+ def teardown(self):
+ self.__disconnect_signals(Annotation)
+ self.__disconnect_signals(Segment)
+
\ No newline at end of file
--- a/src/ldt/ldt/ldt_utils/__init__.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/ldt_utils/__init__.py Wed Feb 27 00:07:54 2013 +0100
@@ -1,2 +0,0 @@
-#initialize
-from ldt.ldt_utils import contentindexer
--- a/src/ldt/ldt/ldt_utils/contentindexer.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/ldt_utils/contentindexer.py Wed Feb 27 00:07:54 2013 +0100
@@ -1,15 +1,16 @@
+from StringIO import StringIO
from django.dispatch import receiver
from ldt import settings
+from ldt.indexation import object_delete, object_insert
+from ldt.ldt_utils.events import post_project_save
from ldt.ldt_utils.models import Segment, Content, Project
-from ldt.ldt_utils.events import post_project_save
from ldt.ldt_utils.stat import update_stat_project, add_annotation_to_stat
from ldt.ldt_utils.utils import reduce_text_node
+from ldt.utils.url import request_with_auth
from tagging import settings as tagging_settings
import logging
import lxml.etree #@UnresolvedImport
import tagging.utils
-from ldt.utils.url import request_with_auth
-from StringIO import StringIO
logger = logging.getLogger(__name__)
@@ -19,8 +20,11 @@
class LdtIndexer(object):
- def __init__(self, decoupage_blackList=settings.DECOUPAGE_BLACKLIST):
- self.__decoupage_blacklist = decoupage_blackList
+ def __init__(self, object_list, decoupage_blackList=settings.DECOUPAGE_BLACKLIST, callback=None):
+ self.__object_list = object_list
+ self.__decoupage_blacklist = decoupage_blackList
+ self.__callback = callback
+ self.__segment_cache = []
@Property
def decoupage_blacklist(): #@NoSelf
@@ -40,7 +44,23 @@
return locals()
def index_all(self):
- raise NotImplemented
+ for i,obj in enumerate(self.__object_list):
+ if self.__callback:
+ self.__callback(i,obj)
+ self.index_object(obj)
+
+
+ def index_object(self, obj):
+
+ self._do_index_object(obj)
+
+ if self.__segment_cache:
+ object_insert(Segment, self.__segment_cache, 'id_hash')
+ self.__segment_cache = []
+
+
+ def _do_index_object(self, obj):
+ raise NotImplementedError()
def index_ensemble(self, ensemble, content, project=None):
ensembleId = ensemble.get(u"id", None)
@@ -97,7 +117,7 @@
audio_src = audio_node[0].get(u"source", u"")
audio_href = audio_node[0].text
- seg = Segment(content=content,
+ seg = Segment.create(content=content,
iri_id=content.iri_id,
ensemble_id=ensembleId,
cutting_id=decoupId,
@@ -114,55 +134,41 @@
audio_src=audio_src,
audio_href=audio_href)
seg.polemics = seg.get_polemic(polemics)
- seg.save()
+ if settings.LDT_INDEXATION_INSERT_BATCH_SIZE < 2:
+ seg.save()
+ else:
+ self.__segment_cache.append(seg)
+ if not (len(self.__segment_cache)%settings.LDT_INDEXATION_INSERT_BATCH_SIZE):
+ object_insert(Segment, self.__segment_cache)
+ self.__segment_cache = []
class ContentIndexer(LdtIndexer):
- def __init__(self, contentList, decoupage_blackList=settings.DECOUPAGE_BLACKLIST, callback=None):
- super(ContentIndexer, self).__init__(decoupage_blackList)
- self.__contentList = contentList
- self.__callback = callback
-
- def index_all(self):
- for i,content in enumerate(self.__contentList):
- if self.__callback:
- self.__callback(i,content)
- self.index_content(content)
-
- def index_content(self, content):
+ def _do_index_object(self, obj):
+
+ content = obj
url = content.iri_url()
_, file_content = request_with_auth(url)
doc = lxml.etree.parse(StringIO(file_content)) #@UndefinedVariable
- Segment.objects.filter(iri_id=content.iri_id).delete() #@UndefinedVariable
+ object_delete(Segment, iri_id=content.iri_id, project_id='')
res = doc.xpath("/iri/body/ensembles/ensemble")
for ensemble in res:
self.index_ensemble(ensemble, content)
-
+
class ProjectIndexer(LdtIndexer):
+
+ def _do_index_object(self, obj):
- def __init__(self, projectList, decoupage_blackList=settings.DECOUPAGE_BLACKLIST, callback=None):
- super(ProjectIndexer, self).__init__(decoupage_blackList)
- self.__projectList = projectList
- self.__callback = callback
-
- def index_all(self):
- for i,project in enumerate(self.__projectList):
- if self.__callback:
- self.__callback(i,project)
-
- self.index_project(project)
-
- def index_project(self, project):
-
+ project = obj
# pocketfilms.utils.log.debug("Indexing project : "+str(project.iri_id))
doc = lxml.etree.fromstring(project.ldt_encoded) #@UndefinedVariable
- Segment.objects.filter(project_obj__ldt_id=project.ldt_id).delete() #@UndefinedVariable
+ object_delete(Segment, project_obj__ldt_id=project.ldt_id)
res = doc.xpath("/iri/annotations/content")
@@ -183,7 +189,7 @@
if must_reindex and settings.AUTO_INDEX_AFTER_SAVE:
instance = kwargs['instance']
if instance.state != Project.PUBLISHED:
- Segment.objects.filter(project_obj__ldt_id=instance.ldt_id).delete() #@UndefinedVariable
+ object_delete(Segment, project_obj__ldt_id=instance.ldt_id)
update_stat_project(instance)
else:
projectIndexer = ProjectIndexer([instance])
@@ -210,7 +216,7 @@
audio_href = params.get("audio_href", "")
polemics = params.get("polemics", "")
- seg = Segment(content=content,
+ seg = Segment.create(content=content,
iri_id=content.iri_id if content is not None else "",
ensemble_id=ensemble_id,
cutting_id=cutting_id,
--- a/src/ldt/ldt/ldt_utils/models.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/ldt_utils/models.py Wed Feb 27 00:07:54 2013 +0100
@@ -44,8 +44,8 @@
class Media(SafeModel):
external_id = models.CharField(max_length=1024, null=True, blank=True, verbose_name=_('media.external_id'))
- external_permalink = models.URLField(max_length=1024, verify_exists=False, null=True, blank=True, verbose_name=_('media.external_permalink'))
- external_publication_url = models.URLField(max_length=1024, verify_exists=True, null=True, blank=True, verbose_name=_('media.external_publication_url'))
+ external_permalink = models.URLField(max_length=1024, null=True, blank=True, verbose_name=_('media.external_permalink'))
+ external_publication_url = models.URLField(max_length=1024, null=True, blank=True, verbose_name=_('media.external_publication_url'))
external_src_url = models.CharField(max_length=1024, null=True, blank=True, verbose_name=_('media.external_src_url'))
creation_date = models.DateTimeField(auto_now_add=True, verbose_name=_('media.creation_date'))
media_creation_date = models.DateTimeField(null=True, blank=True, verbose_name=_('media.media_creation_date'))
@@ -232,7 +232,7 @@
def get_duration(self):
if self.duration is None:
- doc = lxml.etree.parse(self.iri_file_path())
+ doc = lxml.etree.parse(self.iri_file_path()) #@UndefinedVariable
res = doc.xpath("/iri/body/medias/media[@id='video']/video")
if len(res) > 0:
try:
@@ -618,7 +618,7 @@
def get_description(self, doc=None):
if doc is None:
- doc = lxml.etree.fromstring(self.ldt)
+ doc = lxml.etree.fromstring(self.ldt) #@UndefinedVariable
res = doc.xpath("/iri/project")
if len(res) > 0:
@@ -738,7 +738,7 @@
def has_annotations(self):
nb_annot = 0
- doc = lxml.etree.fromstring(self.ldt)
+ doc = lxml.etree.fromstring(self.ldt) #@UndefinedVariable
res = doc.xpath("/iri/annotations/content/ensemble/decoupage")
for r in res:
nb_annot = nb_annot + r.find('elements').__len__()
@@ -788,6 +788,11 @@
audio_src = models.CharField(max_length=255, unique=False, null=True, blank=True)
audio_href = models.CharField(max_length=512, unique=False, null=True, blank=True)
+ @classmethod
+ def create(cls, **kwargs):
+ seg = cls(**kwargs)
+ seg.set_hash()
+ return seg
# All combinations of polemic hashtags can be represented by a combination of
# 4 bits, 1 if the hashtag is in the tweet, 0 else. We use the order OK, KO, Q, REF
@@ -799,7 +804,7 @@
'Q': set([2,3,6,7,10,11,14,15]),
'REF': set([1,3,5,7,9,11,13,15]),
}
-
+
def is_polemic(self, polemic_keyword): # OK, KO, Q, REF
if self.polemics in self.mask[polemic_keyword]:
return True
@@ -816,14 +821,25 @@
return value.pop()
+ def set_hash(self):
+ try:
+ self.id_hash = generate_hash(self.__unicode__())
+ except AttributeError:
+ self.id_hash = None
+
+ def __unicode__(self):
+ return "/".join((
+ unicode(self.project_id if self.project_id is not None else ""),
+ unicode(self.iri_id if self.iri_id is not None else ""),
+ unicode(self.ensemble_id if self.ensemble_id is not None else ""),
+ unicode(self.cutting_id if self.cutting_id is not None else ""),
+ unicode(self.element_id if self.element_id is not None else "")
+ ))
+
def save(self, *args, **kwargs):
- self.id_hash = generate_hash(self.__unicode__())
-
+ self.set_hash()
super(Segment, self).save(*args, **kwargs)
-
- def __unicode__(self):
- return "/".join((unicode(self.project_id), unicode(self.iri_id), unicode(self.ensemble_id), unicode(self.cutting_id), unicode(self.element_id)))
class Meta:
permissions = (
--- a/src/ldt/ldt/management/commands/reindex.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/management/commands/reindex.py Wed Feb 27 00:07:54 2013 +0100
@@ -29,16 +29,13 @@
self.stdout.write('Creating contents index...\n')
contentList = Content.objects.all()
count = contentList.count()
-
- c = lambda i,o: show_progress(i+1, count, o.title, 50)
-
- indexer = ContentIndexer(contentList, callback=c)
+
+ indexer = ContentIndexer(contentList, callback=(lambda i,o: show_progress(i+1, count, o.title, 50)))
indexer.index_all()
if projects:
self.stdout.write('Creating projects index...\n')
projectList = Project.objects.filter(contents__in=contentList, state=2).distinct()
count = projectList.count()
- c = lambda i,o: show_progress(i+1, count, o.title, 50)
- indexer = ProjectIndexer(projectList, callback=c)
+ indexer = ProjectIndexer(projectList, callback=(lambda i,o: show_progress(i+1, count, o.title, 50)))
indexer.index_all()
--- a/src/ldt/ldt/settings.py Sun Feb 10 23:04:28 2013 +0100
+++ b/src/ldt/ldt/settings.py Wed Feb 27 00:07:54 2013 +0100
@@ -10,7 +10,7 @@
#DEFAULT_FROM_EMAIL = "admin@domain.com"
#SERVER_EMAIL = "admin@domain.com"
-INSTALLED_APPS = (
+INSTALLED_APPS = getattr(settings, 'INSTALLED_APPS', (
'django_extensions',
'django.contrib.auth',
'django.contrib.contenttypes',
@@ -35,9 +35,9 @@
'guardian',
'sorl.thumbnail',
'tastypie',
-)
+))
-MIDDLEWARE_CLASSES = (
+MIDDLEWARE_CLASSES = getattr(settings, 'MIDDLEWARE_CLASSES', (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
@@ -49,7 +49,7 @@
'django.contrib.messages.middleware.MessageMiddleware',
'django_openid_consumer.middleware.OpenIDMiddleware',
'ldt.ldt_utils.middleware.userprofile.LanguageMiddleware',
-)
+))
@@ -78,25 +78,26 @@
ACCOUNT_ACTIVATION_DAYS = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', 7)
LDT_MEDIA_PREFIX = getattr(settings, 'LDT_MEDIA_PREFIX', MEDIA_URL + 'ldt/')
-LDT_MAX_SEARCH_NUMBER = 50
-LDT_MAX_FRAGMENT_PER_SEARCH = 3
-LDT_RESULTS_PER_PAGE = 1
-LDT_JSON_DEFAULT_INDENT = 2
-LDT_MAX_CONTENTS_PER_PAGE = 10
-LDT_MAX_PROJECTS_PER_PAGE = 10
-LDT_FRONT_MEDIA_PER_PAGE = 9
+LDT_MAX_SEARCH_NUMBER = getattr(settings, 'LDT_MAX_SEARCH_NUMBER', 50)
+LDT_MAX_FRAGMENT_PER_SEARCH = getattr(settings, 'LDT_MAX_FRAGMENT_PER_SEARCH', 3)
+LDT_RESULTS_PER_PAGE = getattr(settings, 'LDT_RESULTS_PER_PAGE', 1)
+LDT_JSON_DEFAULT_INDENT = getattr(settings, 'LDT_JSON_DEFAULT_INDENT', 2)
+LDT_MAX_CONTENTS_PER_PAGE = getattr(settings, 'LDT_MAX_CONTENTS_PER_PAGE', 10)
+LDT_MAX_PROJECTS_PER_PAGE = getattr(settings, 'LDT_MAX_PROJECTS_PER_PAGE', 10)
+LDT_FRONT_MEDIA_PER_PAGE = getattr(settings, 'LDT_FRONT_MEDIA_PER_PAGE', 9)
AUTO_INDEX_AFTER_SAVE = getattr(settings, 'AUTO_INDEX_AFTER_SAVE', True)
+LDT_INDEXATION_INSERT_BATCH_SIZE = getattr(settings, 'LDT_INDEXATION_INSERT_BATCH_SIZE', 5000)
WEB_VERSION = getattr(settings, 'WEB_VERSION', '')
-ANONYOUS_USER_ID = -1
-USE_GROUP_PERMISSIONS = ['Project', 'Content']
-PUBLIC_GROUP_NAME = 'everyone'
+ANONYOUS_USER_ID = getattr(settings, 'ANONYMOUS_USER_ID',-1)
+USE_GROUP_PERMISSIONS = getattr(settings, 'USE_GROUP_PERMISSIONS', ['Project', 'Content'])
+PUBLIC_GROUP_NAME = getattr(settings, 'PUBLIC_GROUP_NAME','everyone')
-DEFAULT_CONTENT_ICON = "thumbnails/contents/content_default_icon.png"
-DEFAULT_PROJECT_ICON = "thumbnails/projects/project_default_icon.png"
-DEFAULT_USER_ICON = "thumbnails/users/user_default_icon.png"
-DEFAULT_GROUP_ICON = "thumbnails/groups/group_default_icon.png"
+DEFAULT_CONTENT_ICON = getattr(settings, 'DEFAULT_CONTENT_ICON', "thumbnails/contents/content_default_icon.png")
+DEFAULT_PROJECT_ICON = getattr(settings, 'DEFAULT_PROJECT_ICON', "thumbnails/projects/project_default_icon.png")
+DEFAULT_USER_ICON = getattr(settings, 'DEFAULT_USER_ICON', "thumbnails/users/user_default_icon.png")
+DEFAULT_GROUP_ICON = getattr(settings, 'DEFAULT_GROUP_ICON', "thumbnails/groups/group_default_icon.png")
# force settings value
if(not hasattr(settings, 'MAX_TAG_LENGTH') or getattr(settings, 'MAX_TAG_LENGTH') > 255):
@@ -104,9 +105,11 @@
EXTERNAL_STREAM_SRC = getattr(settings, 'EXTERNAL_STREAM_SRC', ['youtube.com', 'dailymotion.com', 'vimeo.com'])
-HAYSTACK_CONNECTIONS = {
+HAYSTACK_CONNECTIONS = getattr(settings, 'HAYSTACK_CONNECTIONS', {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
-}
+})
+HAYSTACK_SIGNAL_PROCESSOR = getattr(settings, 'HAYSTACK_SIGNAL_PROCESSOR', 'ldt.indexation.signals.LdtSignalProcessor')
+