Enhance segment search api. Add 'no_content' param in reindex command.
--- a/src/ldt/ldt/api/ldt/resources/segment.py Fri Feb 15 11:17:54 2013 +0100
+++ b/src/ldt/ldt/api/ldt/resources/segment.py Thu Feb 21 13:10:57 2013 +0100
@@ -2,10 +2,11 @@
from django.conf.urls.defaults import url
from django.core.paginator import Paginator, InvalidPage
from django.db.models import F, Q
-from ldt.indexation import get_results_with_context
+from ldt.indexation import get_results_list
from ldt.ldt_utils.models import Content, Segment
from ldt.ldt_utils.segmentserializer import SegmentSerializer
from tastypie.constants import ALL
+from tastypie.exceptions import BadRequest
from tastypie.http import HttpNotFound
from tastypie.resources import ModelResource
from tastypie.utils import trailing_slash
@@ -40,8 +41,9 @@
self.method_check(request, allowed=['get'])
# Do the query.
search = request.GET.get('q', '')
+ if search=='':
+ raise BadRequest('The request needs a search query "q=" parameter.')
field = "all"
- content_list = None
if u'author:' in search.lower() :
sub = search[7:]
sub = sub.upper()
@@ -50,22 +52,25 @@
if sub[-1] != u'"':
sub = sub + u'"'
search = u'author:' + sub
- results = get_results_with_context(field, search, content_list)
- all_segments = Segment.objects.filter(element_id__in=[e['element_id'] for e in results])
- paginator = Paginator(all_segments, getattr(settings, 'API_LIMIT_PER_PAGE', 20))
-
+
+ results = get_results_list(field, search, False)
+ # get_results_list returns a SearchQuerySet, we load_all() to get all real Segment objects
+ all_segments = results.load_all()
+ paginator = Paginator(all_segments, request.GET.get("limit") or getattr(settings, 'API_LIMIT_PER_PAGE', 20))
+
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise HttpNotFound("Sorry, no results on that page.")
-
+
objects = []
-
- for segment in page.object_list:
- bundle = self.build_bundle(obj=segment, request=request)
+
+ for search_res in page.object_list:
+ # search_res is a SearchResult, search_res.object is the real Segment Object thanks to results.load_all()
+ bundle = self.build_bundle(obj=search_res.object, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
-
+
object_list = {
'objects': objects,
}
--- a/src/ldt/ldt/management/commands/reindex.py Fri Feb 15 11:17:54 2013 +0100
+++ b/src/ldt/ldt/management/commands/reindex.py Thu Feb 21 13:10:57 2013 +0100
@@ -16,11 +16,17 @@
action="store",
type="string",
help="Index only the content specified by CONTENT_ID."),
+ make_option("-n", "--nocontent",
+ dest="no_content",
+ action="store",
+ type="string",
+ help="Avoid index only the content specified by CONTENT_ID."),
)
def handle(self, *args, **options):
content_id = options.get("content_id")
projects = options.get("projects")
+ no_content = options.get("no_content")
if content_id:
self.stdout.write('Creating index for %s\n' % content_id)
@@ -31,14 +37,16 @@
count = contentList.count()
c = lambda i,o: show_progress(i+1, count, o.title, 50)
-
- indexer = ContentIndexer(contentList, callback=c)
- indexer.index_all()
+
+ # if no_content is True, it enables to avoid ContentIndexer and iri file loading (very useful for tests)
+ if not no_content:
+ indexer = ContentIndexer(contentList, callback=c)
+ indexer.index_all()
if projects:
self.stdout.write('Creating projects index...\n')
projectList = Project.objects.filter(contents__in=contentList, state=2).distinct()
count = projectList.count()
- c = lambda i,o: show_progress(i+1, count, o.title, 50)
+ c = lambda i,o: show_progress(i+1, count, o.title, 50)
indexer = ProjectIndexer(projectList, callback=c)
indexer.index_all()