|
1 """ |
|
2 The main QuerySet implementation. This provides the public API for the ORM. |
|
3 """ |
|
4 |
|
5 try: |
|
6 set |
|
7 except NameError: |
|
8 from sets import Set as set # Python 2.3 fallback |
|
9 |
|
10 from copy import deepcopy |
|
11 |
|
12 from django.db import connection, transaction, IntegrityError |
|
13 from django.db.models.aggregates import Aggregate |
|
14 from django.db.models.fields import DateField |
|
15 from django.db.models.query_utils import Q, select_related_descend, CollectedObjects, CyclicDependency, deferred_class_factory |
|
16 from django.db.models import signals, sql |
|
17 |
|
18 |
|
19 # Used to control how many objects are worked with at once in some cases (e.g. |
|
20 # when deleting objects). |
|
21 CHUNK_SIZE = 100 |
|
22 ITER_CHUNK_SIZE = CHUNK_SIZE |
|
23 |
|
24 # The maximum number of items to display in a QuerySet.__repr__ |
|
25 REPR_OUTPUT_SIZE = 20 |
|
26 |
|
27 # Pull into this namespace for backwards compatibility. |
|
28 EmptyResultSet = sql.EmptyResultSet |
|
29 |
|
30 class QuerySet(object): |
|
31 """ |
|
32 Represents a lazy database lookup for a set of objects. |
|
33 """ |
|
34 def __init__(self, model=None, query=None): |
|
35 self.model = model |
|
36 self.query = query or sql.Query(self.model, connection) |
|
37 self._result_cache = None |
|
38 self._iter = None |
|
39 self._sticky_filter = False |
|
40 |
|
41 ######################## |
|
42 # PYTHON MAGIC METHODS # |
|
43 ######################## |
|
44 |
|
45 def __deepcopy__(self, memo): |
|
46 """ |
|
47 Deep copy of a QuerySet doesn't populate the cache |
|
48 """ |
|
49 obj_dict = deepcopy(self.__dict__, memo) |
|
50 obj_dict['_iter'] = None |
|
51 |
|
52 obj = self.__class__() |
|
53 obj.__dict__.update(obj_dict) |
|
54 return obj |
|
55 |
|
56 def __getstate__(self): |
|
57 """ |
|
58 Allows the QuerySet to be pickled. |
|
59 """ |
|
60 # Force the cache to be fully populated. |
|
61 len(self) |
|
62 |
|
63 obj_dict = self.__dict__.copy() |
|
64 obj_dict['_iter'] = None |
|
65 return obj_dict |
|
66 |
|
67 def __repr__(self): |
|
68 data = list(self[:REPR_OUTPUT_SIZE + 1]) |
|
69 if len(data) > REPR_OUTPUT_SIZE: |
|
70 data[-1] = "...(remaining elements truncated)..." |
|
71 return repr(data) |
|
72 |
|
73 def __len__(self): |
|
74 # Since __len__ is called quite frequently (for example, as part of |
|
75 # list(qs), we make some effort here to be as efficient as possible |
|
76 # whilst not messing up any existing iterators against the QuerySet. |
|
77 if self._result_cache is None: |
|
78 if self._iter: |
|
79 self._result_cache = list(self._iter) |
|
80 else: |
|
81 self._result_cache = list(self.iterator()) |
|
82 elif self._iter: |
|
83 self._result_cache.extend(list(self._iter)) |
|
84 return len(self._result_cache) |
|
85 |
|
86 def __iter__(self): |
|
87 if self._result_cache is None: |
|
88 self._iter = self.iterator() |
|
89 self._result_cache = [] |
|
90 if self._iter: |
|
91 return self._result_iter() |
|
92 # Python's list iterator is better than our version when we're just |
|
93 # iterating over the cache. |
|
94 return iter(self._result_cache) |
|
95 |
|
96 def _result_iter(self): |
|
97 pos = 0 |
|
98 while 1: |
|
99 upper = len(self._result_cache) |
|
100 while pos < upper: |
|
101 yield self._result_cache[pos] |
|
102 pos = pos + 1 |
|
103 if not self._iter: |
|
104 raise StopIteration |
|
105 if len(self._result_cache) <= pos: |
|
106 self._fill_cache() |
|
107 |
|
108 def __nonzero__(self): |
|
109 if self._result_cache is not None: |
|
110 return bool(self._result_cache) |
|
111 try: |
|
112 iter(self).next() |
|
113 except StopIteration: |
|
114 return False |
|
115 return True |
|
116 |
|
117 def __getitem__(self, k): |
|
118 """ |
|
119 Retrieves an item or slice from the set of results. |
|
120 """ |
|
121 if not isinstance(k, (slice, int, long)): |
|
122 raise TypeError |
|
123 assert ((not isinstance(k, slice) and (k >= 0)) |
|
124 or (isinstance(k, slice) and (k.start is None or k.start >= 0) |
|
125 and (k.stop is None or k.stop >= 0))), \ |
|
126 "Negative indexing is not supported." |
|
127 |
|
128 if self._result_cache is not None: |
|
129 if self._iter is not None: |
|
130 # The result cache has only been partially populated, so we may |
|
131 # need to fill it out a bit more. |
|
132 if isinstance(k, slice): |
|
133 if k.stop is not None: |
|
134 # Some people insist on passing in strings here. |
|
135 bound = int(k.stop) |
|
136 else: |
|
137 bound = None |
|
138 else: |
|
139 bound = k + 1 |
|
140 if len(self._result_cache) < bound: |
|
141 self._fill_cache(bound - len(self._result_cache)) |
|
142 return self._result_cache[k] |
|
143 |
|
144 if isinstance(k, slice): |
|
145 qs = self._clone() |
|
146 if k.start is not None: |
|
147 start = int(k.start) |
|
148 else: |
|
149 start = None |
|
150 if k.stop is not None: |
|
151 stop = int(k.stop) |
|
152 else: |
|
153 stop = None |
|
154 qs.query.set_limits(start, stop) |
|
155 return k.step and list(qs)[::k.step] or qs |
|
156 try: |
|
157 qs = self._clone() |
|
158 qs.query.set_limits(k, k + 1) |
|
159 return list(qs)[0] |
|
160 except self.model.DoesNotExist, e: |
|
161 raise IndexError, e.args |
|
162 |
|
163 def __and__(self, other): |
|
164 self._merge_sanity_check(other) |
|
165 if isinstance(other, EmptyQuerySet): |
|
166 return other._clone() |
|
167 combined = self._clone() |
|
168 combined.query.combine(other.query, sql.AND) |
|
169 return combined |
|
170 |
|
171 def __or__(self, other): |
|
172 self._merge_sanity_check(other) |
|
173 combined = self._clone() |
|
174 if isinstance(other, EmptyQuerySet): |
|
175 return combined |
|
176 combined.query.combine(other.query, sql.OR) |
|
177 return combined |
|
178 |
|
179 #################################### |
|
180 # METHODS THAT DO DATABASE QUERIES # |
|
181 #################################### |
|
182 |
|
183 def iterator(self): |
|
184 """ |
|
185 An iterator over the results from applying this QuerySet to the |
|
186 database. |
|
187 """ |
|
188 fill_cache = self.query.select_related |
|
189 if isinstance(fill_cache, dict): |
|
190 requested = fill_cache |
|
191 else: |
|
192 requested = None |
|
193 max_depth = self.query.max_depth |
|
194 |
|
195 extra_select = self.query.extra_select.keys() |
|
196 aggregate_select = self.query.aggregate_select.keys() |
|
197 |
|
198 only_load = self.query.get_loaded_field_names() |
|
199 if not fill_cache: |
|
200 fields = self.model._meta.fields |
|
201 pk_idx = self.model._meta.pk_index() |
|
202 |
|
203 index_start = len(extra_select) |
|
204 aggregate_start = index_start + len(self.model._meta.fields) |
|
205 |
|
206 load_fields = [] |
|
207 # If only/defer clauses have been specified, |
|
208 # build the list of fields that are to be loaded. |
|
209 if only_load: |
|
210 for field, model in self.model._meta.get_fields_with_model(): |
|
211 if model is None: |
|
212 model = self.model |
|
213 if field == self.model._meta.pk: |
|
214 # Record the index of the primary key when it is found |
|
215 pk_idx = len(load_fields) |
|
216 try: |
|
217 if field.name in only_load[model]: |
|
218 # Add a field that has been explicitly included |
|
219 load_fields.append(field.name) |
|
220 except KeyError: |
|
221 # Model wasn't explicitly listed in the only_load table |
|
222 # Therefore, we need to load all fields from this model |
|
223 load_fields.append(field.name) |
|
224 |
|
225 skip = None |
|
226 if load_fields and not fill_cache: |
|
227 # Some fields have been deferred, so we have to initialise |
|
228 # via keyword arguments. |
|
229 skip = set() |
|
230 init_list = [] |
|
231 for field in fields: |
|
232 if field.name not in load_fields: |
|
233 skip.add(field.attname) |
|
234 else: |
|
235 init_list.append(field.attname) |
|
236 model_cls = deferred_class_factory(self.model, skip) |
|
237 |
|
238 for row in self.query.results_iter(): |
|
239 if fill_cache: |
|
240 obj, _ = get_cached_row(self.model, row, |
|
241 index_start, max_depth, |
|
242 requested=requested, offset=len(aggregate_select), |
|
243 only_load=only_load) |
|
244 else: |
|
245 if skip: |
|
246 row_data = row[index_start:aggregate_start] |
|
247 pk_val = row_data[pk_idx] |
|
248 obj = model_cls(**dict(zip(init_list, row_data))) |
|
249 else: |
|
250 # Omit aggregates in object creation. |
|
251 obj = self.model(*row[index_start:aggregate_start]) |
|
252 |
|
253 for i, k in enumerate(extra_select): |
|
254 setattr(obj, k, row[i]) |
|
255 |
|
256 # Add the aggregates to the model |
|
257 for i, aggregate in enumerate(aggregate_select): |
|
258 setattr(obj, aggregate, row[i+aggregate_start]) |
|
259 |
|
260 yield obj |
|
261 |
|
262 def aggregate(self, *args, **kwargs): |
|
263 """ |
|
264 Returns a dictionary containing the calculations (aggregation) |
|
265 over the current queryset |
|
266 |
|
267 If args is present the expression is passed as a kwarg ussing |
|
268 the Aggregate object's default alias. |
|
269 """ |
|
270 for arg in args: |
|
271 kwargs[arg.default_alias] = arg |
|
272 |
|
273 query = self.query.clone() |
|
274 |
|
275 for (alias, aggregate_expr) in kwargs.items(): |
|
276 query.add_aggregate(aggregate_expr, self.model, alias, |
|
277 is_summary=True) |
|
278 |
|
279 return query.get_aggregation() |
|
280 |
|
281 def count(self): |
|
282 """ |
|
283 Performs a SELECT COUNT() and returns the number of records as an |
|
284 integer. |
|
285 |
|
286 If the QuerySet is already fully cached this simply returns the length |
|
287 of the cached results set to avoid multiple SELECT COUNT(*) calls. |
|
288 """ |
|
289 if self._result_cache is not None and not self._iter: |
|
290 return len(self._result_cache) |
|
291 |
|
292 return self.query.get_count() |
|
293 |
|
294 def get(self, *args, **kwargs): |
|
295 """ |
|
296 Performs the query and returns a single object matching the given |
|
297 keyword arguments. |
|
298 """ |
|
299 clone = self.filter(*args, **kwargs) |
|
300 num = len(clone) |
|
301 if num == 1: |
|
302 return clone._result_cache[0] |
|
303 if not num: |
|
304 raise self.model.DoesNotExist("%s matching query does not exist." |
|
305 % self.model._meta.object_name) |
|
306 raise self.model.MultipleObjectsReturned("get() returned more than one %s -- it returned %s! Lookup parameters were %s" |
|
307 % (self.model._meta.object_name, num, kwargs)) |
|
308 |
|
309 def create(self, **kwargs): |
|
310 """ |
|
311 Creates a new object with the given kwargs, saving it to the database |
|
312 and returning the created object. |
|
313 """ |
|
314 obj = self.model(**kwargs) |
|
315 obj.save(force_insert=True) |
|
316 return obj |
|
317 |
|
318 def get_or_create(self, **kwargs): |
|
319 """ |
|
320 Looks up an object with the given kwargs, creating one if necessary. |
|
321 Returns a tuple of (object, created), where created is a boolean |
|
322 specifying whether an object was created. |
|
323 """ |
|
324 assert kwargs, \ |
|
325 'get_or_create() must be passed at least one keyword argument' |
|
326 defaults = kwargs.pop('defaults', {}) |
|
327 try: |
|
328 return self.get(**kwargs), False |
|
329 except self.model.DoesNotExist: |
|
330 try: |
|
331 params = dict([(k, v) for k, v in kwargs.items() if '__' not in k]) |
|
332 params.update(defaults) |
|
333 obj = self.model(**params) |
|
334 sid = transaction.savepoint() |
|
335 obj.save(force_insert=True) |
|
336 transaction.savepoint_commit(sid) |
|
337 return obj, True |
|
338 except IntegrityError, e: |
|
339 transaction.savepoint_rollback(sid) |
|
340 try: |
|
341 return self.get(**kwargs), False |
|
342 except self.model.DoesNotExist: |
|
343 raise e |
|
344 |
|
345 def latest(self, field_name=None): |
|
346 """ |
|
347 Returns the latest object, according to the model's 'get_latest_by' |
|
348 option or optional given field_name. |
|
349 """ |
|
350 latest_by = field_name or self.model._meta.get_latest_by |
|
351 assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model" |
|
352 assert self.query.can_filter(), \ |
|
353 "Cannot change a query once a slice has been taken." |
|
354 obj = self._clone() |
|
355 obj.query.set_limits(high=1) |
|
356 obj.query.add_ordering('-%s' % latest_by) |
|
357 return obj.get() |
|
358 |
|
359 def in_bulk(self, id_list): |
|
360 """ |
|
361 Returns a dictionary mapping each of the given IDs to the object with |
|
362 that ID. |
|
363 """ |
|
364 assert self.query.can_filter(), \ |
|
365 "Cannot use 'limit' or 'offset' with in_bulk" |
|
366 assert isinstance(id_list, (tuple, list)), \ |
|
367 "in_bulk() must be provided with a list of IDs." |
|
368 if not id_list: |
|
369 return {} |
|
370 qs = self._clone() |
|
371 qs.query.add_filter(('pk__in', id_list)) |
|
372 return dict([(obj._get_pk_val(), obj) for obj in qs.iterator()]) |
|
373 |
|
374 def delete(self): |
|
375 """ |
|
376 Deletes the records in the current QuerySet. |
|
377 """ |
|
378 assert self.query.can_filter(), \ |
|
379 "Cannot use 'limit' or 'offset' with delete." |
|
380 |
|
381 del_query = self._clone() |
|
382 |
|
383 # Disable non-supported fields. |
|
384 del_query.query.select_related = False |
|
385 del_query.query.clear_ordering() |
|
386 |
|
387 # Delete objects in chunks to prevent the list of related objects from |
|
388 # becoming too long. |
|
389 seen_objs = None |
|
390 while 1: |
|
391 # Collect all the objects to be deleted in this chunk, and all the |
|
392 # objects that are related to the objects that are to be deleted. |
|
393 seen_objs = CollectedObjects(seen_objs) |
|
394 for object in del_query[:CHUNK_SIZE]: |
|
395 object._collect_sub_objects(seen_objs) |
|
396 |
|
397 if not seen_objs: |
|
398 break |
|
399 delete_objects(seen_objs) |
|
400 |
|
401 # Clear the result cache, in case this QuerySet gets reused. |
|
402 self._result_cache = None |
|
403 delete.alters_data = True |
|
404 |
|
405 def update(self, **kwargs): |
|
406 """ |
|
407 Updates all elements in the current QuerySet, setting all the given |
|
408 fields to the appropriate values. |
|
409 """ |
|
410 assert self.query.can_filter(), \ |
|
411 "Cannot update a query once a slice has been taken." |
|
412 query = self.query.clone(sql.UpdateQuery) |
|
413 query.add_update_values(kwargs) |
|
414 if not transaction.is_managed(): |
|
415 transaction.enter_transaction_management() |
|
416 forced_managed = True |
|
417 else: |
|
418 forced_managed = False |
|
419 try: |
|
420 rows = query.execute_sql(None) |
|
421 if forced_managed: |
|
422 transaction.commit() |
|
423 else: |
|
424 transaction.commit_unless_managed() |
|
425 finally: |
|
426 if forced_managed: |
|
427 transaction.leave_transaction_management() |
|
428 self._result_cache = None |
|
429 return rows |
|
430 update.alters_data = True |
|
431 |
|
432 def _update(self, values): |
|
433 """ |
|
434 A version of update that accepts field objects instead of field names. |
|
435 Used primarily for model saving and not intended for use by general |
|
436 code (it requires too much poking around at model internals to be |
|
437 useful at that level). |
|
438 """ |
|
439 assert self.query.can_filter(), \ |
|
440 "Cannot update a query once a slice has been taken." |
|
441 query = self.query.clone(sql.UpdateQuery) |
|
442 query.add_update_fields(values) |
|
443 self._result_cache = None |
|
444 return query.execute_sql(None) |
|
445 _update.alters_data = True |
|
446 |
|
447 ################################################## |
|
448 # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # |
|
449 ################################################## |
|
450 |
|
451 def values(self, *fields): |
|
452 return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields) |
|
453 |
|
454 def values_list(self, *fields, **kwargs): |
|
455 flat = kwargs.pop('flat', False) |
|
456 if kwargs: |
|
457 raise TypeError('Unexpected keyword arguments to values_list: %s' |
|
458 % (kwargs.keys(),)) |
|
459 if flat and len(fields) > 1: |
|
460 raise TypeError("'flat' is not valid when values_list is called with more than one field.") |
|
461 return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat, |
|
462 _fields=fields) |
|
463 |
|
464 def dates(self, field_name, kind, order='ASC'): |
|
465 """ |
|
466 Returns a list of datetime objects representing all available dates for |
|
467 the given field_name, scoped to 'kind'. |
|
468 """ |
|
469 assert kind in ("month", "year", "day"), \ |
|
470 "'kind' must be one of 'year', 'month' or 'day'." |
|
471 assert order in ('ASC', 'DESC'), \ |
|
472 "'order' must be either 'ASC' or 'DESC'." |
|
473 return self._clone(klass=DateQuerySet, setup=True, |
|
474 _field_name=field_name, _kind=kind, _order=order) |
|
475 |
|
476 def none(self): |
|
477 """ |
|
478 Returns an empty QuerySet. |
|
479 """ |
|
480 return self._clone(klass=EmptyQuerySet) |
|
481 |
|
482 ################################################################## |
|
483 # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # |
|
484 ################################################################## |
|
485 |
|
486 def all(self): |
|
487 """ |
|
488 Returns a new QuerySet that is a copy of the current one. This allows a |
|
489 QuerySet to proxy for a model manager in some cases. |
|
490 """ |
|
491 return self._clone() |
|
492 |
|
493 def filter(self, *args, **kwargs): |
|
494 """ |
|
495 Returns a new QuerySet instance with the args ANDed to the existing |
|
496 set. |
|
497 """ |
|
498 return self._filter_or_exclude(False, *args, **kwargs) |
|
499 |
|
500 def exclude(self, *args, **kwargs): |
|
501 """ |
|
502 Returns a new QuerySet instance with NOT (args) ANDed to the existing |
|
503 set. |
|
504 """ |
|
505 return self._filter_or_exclude(True, *args, **kwargs) |
|
506 |
|
507 def _filter_or_exclude(self, negate, *args, **kwargs): |
|
508 if args or kwargs: |
|
509 assert self.query.can_filter(), \ |
|
510 "Cannot filter a query once a slice has been taken." |
|
511 |
|
512 clone = self._clone() |
|
513 if negate: |
|
514 clone.query.add_q(~Q(*args, **kwargs)) |
|
515 else: |
|
516 clone.query.add_q(Q(*args, **kwargs)) |
|
517 return clone |
|
518 |
|
519 def complex_filter(self, filter_obj): |
|
520 """ |
|
521 Returns a new QuerySet instance with filter_obj added to the filters. |
|
522 |
|
523 filter_obj can be a Q object (or anything with an add_to_query() |
|
524 method) or a dictionary of keyword lookup arguments. |
|
525 |
|
526 This exists to support framework features such as 'limit_choices_to', |
|
527 and usually it will be more natural to use other methods. |
|
528 """ |
|
529 if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'): |
|
530 clone = self._clone() |
|
531 clone.query.add_q(filter_obj) |
|
532 return clone |
|
533 else: |
|
534 return self._filter_or_exclude(None, **filter_obj) |
|
535 |
|
536 def select_related(self, *fields, **kwargs): |
|
537 """ |
|
538 Returns a new QuerySet instance that will select related objects. |
|
539 |
|
540 If fields are specified, they must be ForeignKey fields and only those |
|
541 related objects are included in the selection. |
|
542 """ |
|
543 depth = kwargs.pop('depth', 0) |
|
544 if kwargs: |
|
545 raise TypeError('Unexpected keyword arguments to select_related: %s' |
|
546 % (kwargs.keys(),)) |
|
547 obj = self._clone() |
|
548 if fields: |
|
549 if depth: |
|
550 raise TypeError('Cannot pass both "depth" and fields to select_related()') |
|
551 obj.query.add_select_related(fields) |
|
552 else: |
|
553 obj.query.select_related = True |
|
554 if depth: |
|
555 obj.query.max_depth = depth |
|
556 return obj |
|
557 |
|
558 def dup_select_related(self, other): |
|
559 """ |
|
560 Copies the related selection status from the QuerySet 'other' to the |
|
561 current QuerySet. |
|
562 """ |
|
563 self.query.select_related = other.query.select_related |
|
564 |
|
565 def annotate(self, *args, **kwargs): |
|
566 """ |
|
567 Return a query set in which the returned objects have been annotated |
|
568 with data aggregated from related fields. |
|
569 """ |
|
570 for arg in args: |
|
571 kwargs[arg.default_alias] = arg |
|
572 |
|
573 obj = self._clone() |
|
574 |
|
575 obj._setup_aggregate_query(kwargs.keys()) |
|
576 |
|
577 # Add the aggregates to the query |
|
578 for (alias, aggregate_expr) in kwargs.items(): |
|
579 obj.query.add_aggregate(aggregate_expr, self.model, alias, |
|
580 is_summary=False) |
|
581 |
|
582 return obj |
|
583 |
|
584 def order_by(self, *field_names): |
|
585 """ |
|
586 Returns a new QuerySet instance with the ordering changed. |
|
587 """ |
|
588 assert self.query.can_filter(), \ |
|
589 "Cannot reorder a query once a slice has been taken." |
|
590 obj = self._clone() |
|
591 obj.query.clear_ordering() |
|
592 obj.query.add_ordering(*field_names) |
|
593 return obj |
|
594 |
|
595 def distinct(self, true_or_false=True): |
|
596 """ |
|
597 Returns a new QuerySet instance that will select only distinct results. |
|
598 """ |
|
599 obj = self._clone() |
|
600 obj.query.distinct = true_or_false |
|
601 return obj |
|
602 |
|
603 def extra(self, select=None, where=None, params=None, tables=None, |
|
604 order_by=None, select_params=None): |
|
605 """ |
|
606 Adds extra SQL fragments to the query. |
|
607 """ |
|
608 assert self.query.can_filter(), \ |
|
609 "Cannot change a query once a slice has been taken" |
|
610 clone = self._clone() |
|
611 clone.query.add_extra(select, select_params, where, params, tables, order_by) |
|
612 return clone |
|
613 |
|
614 def reverse(self): |
|
615 """ |
|
616 Reverses the ordering of the QuerySet. |
|
617 """ |
|
618 clone = self._clone() |
|
619 clone.query.standard_ordering = not clone.query.standard_ordering |
|
620 return clone |
|
621 |
|
622 def defer(self, *fields): |
|
623 """ |
|
624 Defers the loading of data for certain fields until they are accessed. |
|
625 The set of fields to defer is added to any existing set of deferred |
|
626 fields. The only exception to this is if None is passed in as the only |
|
627 parameter, in which case all deferrals are removed (None acts as a |
|
628 reset option). |
|
629 """ |
|
630 clone = self._clone() |
|
631 if fields == (None,): |
|
632 clone.query.clear_deferred_loading() |
|
633 else: |
|
634 clone.query.add_deferred_loading(fields) |
|
635 return clone |
|
636 |
|
637 def only(self, *fields): |
|
638 """ |
|
639 Essentially, the opposite of defer. Only the fields passed into this |
|
640 method and that are not already specified as deferred are loaded |
|
641 immediately when the queryset is evaluated. |
|
642 """ |
|
643 if fields == (None,): |
|
644 # Can only pass None to defer(), not only(), as the rest option. |
|
645 # That won't stop people trying to do this, so let's be explicit. |
|
646 raise TypeError("Cannot pass None as an argument to only().") |
|
647 clone = self._clone() |
|
648 clone.query.add_immediate_loading(fields) |
|
649 return clone |
|
650 |
|
651 ################################### |
|
652 # PUBLIC INTROSPECTION ATTRIBUTES # |
|
653 ################################### |
|
654 |
|
655 def ordered(self): |
|
656 """ |
|
657 Returns True if the QuerySet is ordered -- i.e. has an order_by() |
|
658 clause or a default ordering on the model. |
|
659 """ |
|
660 if self.query.extra_order_by or self.query.order_by: |
|
661 return True |
|
662 elif self.query.default_ordering and self.query.model._meta.ordering: |
|
663 return True |
|
664 else: |
|
665 return False |
|
666 ordered = property(ordered) |
|
667 |
|
668 ################### |
|
669 # PRIVATE METHODS # |
|
670 ################### |
|
671 |
|
672 def _clone(self, klass=None, setup=False, **kwargs): |
|
673 if klass is None: |
|
674 klass = self.__class__ |
|
675 query = self.query.clone() |
|
676 if self._sticky_filter: |
|
677 query.filter_is_sticky = True |
|
678 c = klass(model=self.model, query=query) |
|
679 c.__dict__.update(kwargs) |
|
680 if setup and hasattr(c, '_setup_query'): |
|
681 c._setup_query() |
|
682 return c |
|
683 |
|
684 def _fill_cache(self, num=None): |
|
685 """ |
|
686 Fills the result cache with 'num' more entries (or until the results |
|
687 iterator is exhausted). |
|
688 """ |
|
689 if self._iter: |
|
690 try: |
|
691 for i in range(num or ITER_CHUNK_SIZE): |
|
692 self._result_cache.append(self._iter.next()) |
|
693 except StopIteration: |
|
694 self._iter = None |
|
695 |
|
696 def _next_is_sticky(self): |
|
697 """ |
|
698 Indicates that the next filter call and the one following that should |
|
699 be treated as a single filter. This is only important when it comes to |
|
700 determining when to reuse tables for many-to-many filters. Required so |
|
701 that we can filter naturally on the results of related managers. |
|
702 |
|
703 This doesn't return a clone of the current QuerySet (it returns |
|
704 "self"). The method is only used internally and should be immediately |
|
705 followed by a filter() that does create a clone. |
|
706 """ |
|
707 self._sticky_filter = True |
|
708 return self |
|
709 |
|
710 def _merge_sanity_check(self, other): |
|
711 """ |
|
712 Checks that we are merging two comparable QuerySet classes. By default |
|
713 this does nothing, but see the ValuesQuerySet for an example of where |
|
714 it's useful. |
|
715 """ |
|
716 pass |
|
717 |
|
718 def _setup_aggregate_query(self, aggregates): |
|
719 """ |
|
720 Prepare the query for computing a result that contains aggregate annotations. |
|
721 """ |
|
722 opts = self.model._meta |
|
723 if self.query.group_by is None: |
|
724 field_names = [f.attname for f in opts.fields] |
|
725 self.query.add_fields(field_names, False) |
|
726 self.query.set_group_by() |
|
727 |
|
728 def _as_sql(self): |
|
729 """ |
|
730 Returns the internal query's SQL and parameters (as a tuple). |
|
731 """ |
|
732 obj = self.values("pk") |
|
733 return obj.query.as_nested_sql() |
|
734 |
|
735 # When used as part of a nested query, a queryset will never be an "always |
|
736 # empty" result. |
|
737 value_annotation = True |
|
738 |
|
739 class ValuesQuerySet(QuerySet): |
|
740 def __init__(self, *args, **kwargs): |
|
741 super(ValuesQuerySet, self).__init__(*args, **kwargs) |
|
742 # select_related isn't supported in values(). (FIXME -#3358) |
|
743 self.query.select_related = False |
|
744 |
|
745 # QuerySet.clone() will also set up the _fields attribute with the |
|
746 # names of the model fields to select. |
|
747 |
|
748 def iterator(self): |
|
749 # Purge any extra columns that haven't been explicitly asked for |
|
750 extra_names = self.query.extra_select.keys() |
|
751 field_names = self.field_names |
|
752 aggregate_names = self.query.aggregate_select.keys() |
|
753 |
|
754 names = extra_names + field_names + aggregate_names |
|
755 |
|
756 for row in self.query.results_iter(): |
|
757 yield dict(zip(names, row)) |
|
758 |
|
759 def _setup_query(self): |
|
760 """ |
|
761 Constructs the field_names list that the values query will be |
|
762 retrieving. |
|
763 |
|
764 Called by the _clone() method after initializing the rest of the |
|
765 instance. |
|
766 """ |
|
767 self.query.clear_deferred_loading() |
|
768 self.query.clear_select_fields() |
|
769 |
|
770 if self._fields: |
|
771 self.extra_names = [] |
|
772 self.aggregate_names = [] |
|
773 if not self.query.extra and not self.query.aggregates: |
|
774 # Short cut - if there are no extra or aggregates, then |
|
775 # the values() clause must be just field names. |
|
776 self.field_names = list(self._fields) |
|
777 else: |
|
778 self.query.default_cols = False |
|
779 self.field_names = [] |
|
780 for f in self._fields: |
|
781 # we inspect the full extra_select list since we might |
|
782 # be adding back an extra select item that we hadn't |
|
783 # had selected previously. |
|
784 if self.query.extra.has_key(f): |
|
785 self.extra_names.append(f) |
|
786 elif self.query.aggregate_select.has_key(f): |
|
787 self.aggregate_names.append(f) |
|
788 else: |
|
789 self.field_names.append(f) |
|
790 else: |
|
791 # Default to all fields. |
|
792 self.extra_names = None |
|
793 self.field_names = [f.attname for f in self.model._meta.fields] |
|
794 self.aggregate_names = None |
|
795 |
|
796 self.query.select = [] |
|
797 if self.extra_names is not None: |
|
798 self.query.set_extra_mask(self.extra_names) |
|
799 self.query.add_fields(self.field_names, False) |
|
800 if self.aggregate_names is not None: |
|
801 self.query.set_aggregate_mask(self.aggregate_names) |
|
802 |
|
803 def _clone(self, klass=None, setup=False, **kwargs): |
|
804 """ |
|
805 Cloning a ValuesQuerySet preserves the current fields. |
|
806 """ |
|
807 c = super(ValuesQuerySet, self)._clone(klass, **kwargs) |
|
808 if not hasattr(c, '_fields'): |
|
809 # Only clone self._fields if _fields wasn't passed into the cloning |
|
810 # call directly. |
|
811 c._fields = self._fields[:] |
|
812 c.field_names = self.field_names |
|
813 c.extra_names = self.extra_names |
|
814 c.aggregate_names = self.aggregate_names |
|
815 if setup and hasattr(c, '_setup_query'): |
|
816 c._setup_query() |
|
817 return c |
|
818 |
|
819 def _merge_sanity_check(self, other): |
|
820 super(ValuesQuerySet, self)._merge_sanity_check(other) |
|
821 if (set(self.extra_names) != set(other.extra_names) or |
|
822 set(self.field_names) != set(other.field_names) or |
|
823 self.aggregate_names != other.aggregate_names): |
|
824 raise TypeError("Merging '%s' classes must involve the same values in each case." |
|
825 % self.__class__.__name__) |
|
826 |
|
827 def _setup_aggregate_query(self, aggregates): |
|
828 """ |
|
829 Prepare the query for computing a result that contains aggregate annotations. |
|
830 """ |
|
831 self.query.set_group_by() |
|
832 |
|
833 if self.aggregate_names is not None: |
|
834 self.aggregate_names.extend(aggregates) |
|
835 self.query.set_aggregate_mask(self.aggregate_names) |
|
836 |
|
837 super(ValuesQuerySet, self)._setup_aggregate_query(aggregates) |
|
838 |
|
839 def _as_sql(self): |
|
840 """ |
|
841 For ValueQuerySet (and subclasses like ValuesListQuerySet), they can |
|
842 only be used as nested queries if they're already set up to select only |
|
843 a single field (in which case, that is the field column that is |
|
844 returned). This differs from QuerySet.as_sql(), where the column to |
|
845 select is set up by Django. |
|
846 """ |
|
847 if ((self._fields and len(self._fields) > 1) or |
|
848 (not self._fields and len(self.model._meta.fields) > 1)): |
|
849 raise TypeError('Cannot use a multi-field %s as a filter value.' |
|
850 % self.__class__.__name__) |
|
851 return self._clone().query.as_nested_sql() |
|
852 |
|
853 class ValuesListQuerySet(ValuesQuerySet): |
|
854 def iterator(self): |
|
855 if self.flat and len(self._fields) == 1: |
|
856 for row in self.query.results_iter(): |
|
857 yield row[0] |
|
858 elif not self.query.extra_select and not self.query.aggregate_select: |
|
859 for row in self.query.results_iter(): |
|
860 yield tuple(row) |
|
861 else: |
|
862 # When extra(select=...) or an annotation is involved, the extra |
|
863 # cols are always at the start of the row, and we need to reorder |
|
864 # the fields to match the order in self._fields. |
|
865 extra_names = self.query.extra_select.keys() |
|
866 field_names = self.field_names |
|
867 aggregate_names = self.query.aggregate_select.keys() |
|
868 |
|
869 names = extra_names + field_names + aggregate_names |
|
870 |
|
871 # If a field list has been specified, use it. Otherwise, use the |
|
872 # full list of fields, including extras and aggregates. |
|
873 if self._fields: |
|
874 fields = self._fields |
|
875 else: |
|
876 fields = names |
|
877 |
|
878 for row in self.query.results_iter(): |
|
879 data = dict(zip(names, row)) |
|
880 yield tuple([data[f] for f in fields]) |
|
881 |
|
882 def _clone(self, *args, **kwargs): |
|
883 clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs) |
|
884 clone.flat = self.flat |
|
885 return clone |
|
886 |
|
887 |
|
888 class DateQuerySet(QuerySet): |
|
889 def iterator(self): |
|
890 return self.query.results_iter() |
|
891 |
|
892 def _setup_query(self): |
|
893 """ |
|
894 Sets up any special features of the query attribute. |
|
895 |
|
896 Called by the _clone() method after initializing the rest of the |
|
897 instance. |
|
898 """ |
|
899 self.query.clear_deferred_loading() |
|
900 self.query = self.query.clone(klass=sql.DateQuery, setup=True) |
|
901 self.query.select = [] |
|
902 field = self.model._meta.get_field(self._field_name, many_to_many=False) |
|
903 assert isinstance(field, DateField), "%r isn't a DateField." \ |
|
904 % field.name |
|
905 self.query.add_date_select(field, self._kind, self._order) |
|
906 if field.null: |
|
907 self.query.add_filter(('%s__isnull' % field.name, False)) |
|
908 |
|
909 def _clone(self, klass=None, setup=False, **kwargs): |
|
910 c = super(DateQuerySet, self)._clone(klass, False, **kwargs) |
|
911 c._field_name = self._field_name |
|
912 c._kind = self._kind |
|
913 if setup and hasattr(c, '_setup_query'): |
|
914 c._setup_query() |
|
915 return c |
|
916 |
|
917 |
|
918 class EmptyQuerySet(QuerySet): |
|
919 def __init__(self, model=None, query=None): |
|
920 super(EmptyQuerySet, self).__init__(model, query) |
|
921 self._result_cache = [] |
|
922 |
|
923 def __and__(self, other): |
|
924 return self._clone() |
|
925 |
|
926 def __or__(self, other): |
|
927 return other._clone() |
|
928 |
|
929 def count(self): |
|
930 return 0 |
|
931 |
|
932 def delete(self): |
|
933 pass |
|
934 |
|
935 def _clone(self, klass=None, setup=False, **kwargs): |
|
936 c = super(EmptyQuerySet, self)._clone(klass, **kwargs) |
|
937 c._result_cache = [] |
|
938 return c |
|
939 |
|
940 def iterator(self): |
|
941 # This slightly odd construction is because we need an empty generator |
|
942 # (it raises StopIteration immediately). |
|
943 yield iter([]).next() |
|
944 |
|
945 # EmptyQuerySet is always an empty result in where-clauses (and similar |
|
946 # situations). |
|
947 value_annotation = False |
|
948 |
|
949 |
|
950 def get_cached_row(klass, row, index_start, max_depth=0, cur_depth=0, |
|
951 requested=None, offset=0, only_load=None): |
|
952 """ |
|
953 Helper function that recursively returns an object with the specified |
|
954 related attributes already populated. |
|
955 """ |
|
956 if max_depth and requested is None and cur_depth > max_depth: |
|
957 # We've recursed deeply enough; stop now. |
|
958 return None |
|
959 |
|
960 restricted = requested is not None |
|
961 load_fields = only_load and only_load.get(klass) or None |
|
962 if load_fields: |
|
963 # Handle deferred fields. |
|
964 skip = set() |
|
965 init_list = [] |
|
966 pk_val = row[index_start + klass._meta.pk_index()] |
|
967 for field in klass._meta.fields: |
|
968 if field.name not in load_fields: |
|
969 skip.add(field.name) |
|
970 else: |
|
971 init_list.append(field.attname) |
|
972 field_count = len(init_list) |
|
973 fields = row[index_start : index_start + field_count] |
|
974 if fields == (None,) * field_count: |
|
975 obj = None |
|
976 elif skip: |
|
977 klass = deferred_class_factory(klass, skip) |
|
978 obj = klass(**dict(zip(init_list, fields))) |
|
979 else: |
|
980 obj = klass(*fields) |
|
981 else: |
|
982 field_count = len(klass._meta.fields) |
|
983 fields = row[index_start : index_start + field_count] |
|
984 if fields == (None,) * field_count: |
|
985 obj = None |
|
986 else: |
|
987 obj = klass(*fields) |
|
988 |
|
989 index_end = index_start + field_count + offset |
|
990 for f in klass._meta.fields: |
|
991 if not select_related_descend(f, restricted, requested): |
|
992 continue |
|
993 if restricted: |
|
994 next = requested[f.name] |
|
995 else: |
|
996 next = None |
|
997 cached_row = get_cached_row(f.rel.to, row, index_end, max_depth, |
|
998 cur_depth+1, next) |
|
999 if cached_row: |
|
1000 rel_obj, index_end = cached_row |
|
1001 if obj is not None: |
|
1002 setattr(obj, f.get_cache_name(), rel_obj) |
|
1003 return obj, index_end |
|
1004 |
|
1005 def delete_objects(seen_objs): |
|
1006 """ |
|
1007 Iterate through a list of seen classes, and remove any instances that are |
|
1008 referred to. |
|
1009 """ |
|
1010 if not transaction.is_managed(): |
|
1011 transaction.enter_transaction_management() |
|
1012 forced_managed = True |
|
1013 else: |
|
1014 forced_managed = False |
|
1015 try: |
|
1016 ordered_classes = seen_objs.keys() |
|
1017 except CyclicDependency: |
|
1018 # If there is a cyclic dependency, we cannot in general delete the |
|
1019 # objects. However, if an appropriate transaction is set up, or if the |
|
1020 # database is lax enough, it will succeed. So for now, we go ahead and |
|
1021 # try anyway. |
|
1022 ordered_classes = seen_objs.unordered_keys() |
|
1023 |
|
1024 obj_pairs = {} |
|
1025 try: |
|
1026 for cls in ordered_classes: |
|
1027 items = seen_objs[cls].items() |
|
1028 items.sort() |
|
1029 obj_pairs[cls] = items |
|
1030 |
|
1031 # Pre-notify all instances to be deleted. |
|
1032 for pk_val, instance in items: |
|
1033 signals.pre_delete.send(sender=cls, instance=instance) |
|
1034 |
|
1035 pk_list = [pk for pk,instance in items] |
|
1036 del_query = sql.DeleteQuery(cls, connection) |
|
1037 del_query.delete_batch_related(pk_list) |
|
1038 |
|
1039 update_query = sql.UpdateQuery(cls, connection) |
|
1040 for field, model in cls._meta.get_fields_with_model(): |
|
1041 if (field.rel and field.null and field.rel.to in seen_objs and |
|
1042 filter(lambda f: f.column == field.rel.get_related_field().column, |
|
1043 field.rel.to._meta.fields)): |
|
1044 if model: |
|
1045 sql.UpdateQuery(model, connection).clear_related(field, |
|
1046 pk_list) |
|
1047 else: |
|
1048 update_query.clear_related(field, pk_list) |
|
1049 |
|
1050 # Now delete the actual data. |
|
1051 for cls in ordered_classes: |
|
1052 items = obj_pairs[cls] |
|
1053 items.reverse() |
|
1054 |
|
1055 pk_list = [pk for pk,instance in items] |
|
1056 del_query = sql.DeleteQuery(cls, connection) |
|
1057 del_query.delete_batch(pk_list) |
|
1058 |
|
1059 # Last cleanup; set NULLs where there once was a reference to the |
|
1060 # object, NULL the primary key of the found objects, and perform |
|
1061 # post-notification. |
|
1062 for pk_val, instance in items: |
|
1063 for field in cls._meta.fields: |
|
1064 if field.rel and field.null and field.rel.to in seen_objs: |
|
1065 setattr(instance, field.attname, None) |
|
1066 |
|
1067 signals.post_delete.send(sender=cls, instance=instance) |
|
1068 setattr(instance, cls._meta.pk.attname, None) |
|
1069 |
|
1070 if forced_managed: |
|
1071 transaction.commit() |
|
1072 else: |
|
1073 transaction.commit_unless_managed() |
|
1074 finally: |
|
1075 if forced_managed: |
|
1076 transaction.leave_transaction_management() |
|
1077 |
|
1078 |
|
1079 def insert_query(model, values, return_id=False, raw_values=False): |
|
1080 """ |
|
1081 Inserts a new record for the given model. This provides an interface to |
|
1082 the InsertQuery class and is how Model.save() is implemented. It is not |
|
1083 part of the public API. |
|
1084 """ |
|
1085 query = sql.InsertQuery(model, connection) |
|
1086 query.insert_values(values, raw_values) |
|
1087 return query.execute_sql(return_id) |