|
1 """ |
|
2 The main QuerySet implementation. This provides the public API for the ORM. |
|
3 """ |
|
4 |
|
5 from copy import deepcopy |
|
6 from itertools import izip |
|
7 |
|
8 from django.db import connections, router, transaction, IntegrityError |
|
9 from django.db.models.aggregates import Aggregate |
|
10 from django.db.models.fields import DateField |
|
11 from django.db.models.query_utils import Q, select_related_descend, CollectedObjects, CyclicDependency, deferred_class_factory, InvalidQuery |
|
12 from django.db.models import signals, sql |
|
13 from django.utils.copycompat import deepcopy |
|
14 |
|
15 # Used to control how many objects are worked with at once in some cases (e.g. |
|
16 # when deleting objects). |
|
17 CHUNK_SIZE = 100 |
|
18 ITER_CHUNK_SIZE = CHUNK_SIZE |
|
19 |
|
20 # The maximum number of items to display in a QuerySet.__repr__ |
|
21 REPR_OUTPUT_SIZE = 20 |
|
22 |
|
23 # Pull into this namespace for backwards compatibility. |
|
24 EmptyResultSet = sql.EmptyResultSet |
|
25 |
|
26 class QuerySet(object): |
|
27 """ |
|
28 Represents a lazy database lookup for a set of objects. |
|
29 """ |
|
30 def __init__(self, model=None, query=None, using=None): |
|
31 self.model = model |
|
32 # EmptyQuerySet instantiates QuerySet with model as None |
|
33 self._db = using |
|
34 self.query = query or sql.Query(self.model) |
|
35 self._result_cache = None |
|
36 self._iter = None |
|
37 self._sticky_filter = False |
|
38 self._for_write = False |
|
39 |
|
40 ######################## |
|
41 # PYTHON MAGIC METHODS # |
|
42 ######################## |
|
43 |
|
44 def __deepcopy__(self, memo): |
|
45 """ |
|
46 Deep copy of a QuerySet doesn't populate the cache |
|
47 """ |
|
48 obj = self.__class__() |
|
49 for k,v in self.__dict__.items(): |
|
50 if k in ('_iter','_result_cache'): |
|
51 obj.__dict__[k] = None |
|
52 else: |
|
53 obj.__dict__[k] = deepcopy(v, memo) |
|
54 return obj |
|
55 |
|
56 def __getstate__(self): |
|
57 """ |
|
58 Allows the QuerySet to be pickled. |
|
59 """ |
|
60 # Force the cache to be fully populated. |
|
61 len(self) |
|
62 |
|
63 obj_dict = self.__dict__.copy() |
|
64 obj_dict['_iter'] = None |
|
65 return obj_dict |
|
66 |
|
67 def __repr__(self): |
|
68 data = list(self[:REPR_OUTPUT_SIZE + 1]) |
|
69 if len(data) > REPR_OUTPUT_SIZE: |
|
70 data[-1] = "...(remaining elements truncated)..." |
|
71 return repr(data) |
|
72 |
|
73 def __len__(self): |
|
74 # Since __len__ is called quite frequently (for example, as part of |
|
75 # list(qs), we make some effort here to be as efficient as possible |
|
76 # whilst not messing up any existing iterators against the QuerySet. |
|
77 if self._result_cache is None: |
|
78 if self._iter: |
|
79 self._result_cache = list(self._iter) |
|
80 else: |
|
81 self._result_cache = list(self.iterator()) |
|
82 elif self._iter: |
|
83 self._result_cache.extend(list(self._iter)) |
|
84 return len(self._result_cache) |
|
85 |
|
86 def __iter__(self): |
|
87 if self._result_cache is None: |
|
88 self._iter = self.iterator() |
|
89 self._result_cache = [] |
|
90 if self._iter: |
|
91 return self._result_iter() |
|
92 # Python's list iterator is better than our version when we're just |
|
93 # iterating over the cache. |
|
94 return iter(self._result_cache) |
|
95 |
|
96 def _result_iter(self): |
|
97 pos = 0 |
|
98 while 1: |
|
99 upper = len(self._result_cache) |
|
100 while pos < upper: |
|
101 yield self._result_cache[pos] |
|
102 pos = pos + 1 |
|
103 if not self._iter: |
|
104 raise StopIteration |
|
105 if len(self._result_cache) <= pos: |
|
106 self._fill_cache() |
|
107 |
|
108 def __nonzero__(self): |
|
109 if self._result_cache is not None: |
|
110 return bool(self._result_cache) |
|
111 try: |
|
112 iter(self).next() |
|
113 except StopIteration: |
|
114 return False |
|
115 return True |
|
116 |
|
117 def __contains__(self, val): |
|
118 # The 'in' operator works without this method, due to __iter__. This |
|
119 # implementation exists only to shortcut the creation of Model |
|
120 # instances, by bailing out early if we find a matching element. |
|
121 pos = 0 |
|
122 if self._result_cache is not None: |
|
123 if val in self._result_cache: |
|
124 return True |
|
125 elif self._iter is None: |
|
126 # iterator is exhausted, so we have our answer |
|
127 return False |
|
128 # remember not to check these again: |
|
129 pos = len(self._result_cache) |
|
130 else: |
|
131 # We need to start filling the result cache out. The following |
|
132 # ensures that self._iter is not None and self._result_cache is not |
|
133 # None |
|
134 it = iter(self) |
|
135 |
|
136 # Carry on, one result at a time. |
|
137 while True: |
|
138 if len(self._result_cache) <= pos: |
|
139 self._fill_cache(num=1) |
|
140 if self._iter is None: |
|
141 # we ran out of items |
|
142 return False |
|
143 if self._result_cache[pos] == val: |
|
144 return True |
|
145 pos += 1 |
|
146 |
|
147 def __getitem__(self, k): |
|
148 """ |
|
149 Retrieves an item or slice from the set of results. |
|
150 """ |
|
151 if not isinstance(k, (slice, int, long)): |
|
152 raise TypeError |
|
153 assert ((not isinstance(k, slice) and (k >= 0)) |
|
154 or (isinstance(k, slice) and (k.start is None or k.start >= 0) |
|
155 and (k.stop is None or k.stop >= 0))), \ |
|
156 "Negative indexing is not supported." |
|
157 |
|
158 if self._result_cache is not None: |
|
159 if self._iter is not None: |
|
160 # The result cache has only been partially populated, so we may |
|
161 # need to fill it out a bit more. |
|
162 if isinstance(k, slice): |
|
163 if k.stop is not None: |
|
164 # Some people insist on passing in strings here. |
|
165 bound = int(k.stop) |
|
166 else: |
|
167 bound = None |
|
168 else: |
|
169 bound = k + 1 |
|
170 if len(self._result_cache) < bound: |
|
171 self._fill_cache(bound - len(self._result_cache)) |
|
172 return self._result_cache[k] |
|
173 |
|
174 if isinstance(k, slice): |
|
175 qs = self._clone() |
|
176 if k.start is not None: |
|
177 start = int(k.start) |
|
178 else: |
|
179 start = None |
|
180 if k.stop is not None: |
|
181 stop = int(k.stop) |
|
182 else: |
|
183 stop = None |
|
184 qs.query.set_limits(start, stop) |
|
185 return k.step and list(qs)[::k.step] or qs |
|
186 try: |
|
187 qs = self._clone() |
|
188 qs.query.set_limits(k, k + 1) |
|
189 return list(qs)[0] |
|
190 except self.model.DoesNotExist, e: |
|
191 raise IndexError(e.args) |
|
192 |
|
193 def __and__(self, other): |
|
194 self._merge_sanity_check(other) |
|
195 if isinstance(other, EmptyQuerySet): |
|
196 return other._clone() |
|
197 combined = self._clone() |
|
198 combined.query.combine(other.query, sql.AND) |
|
199 return combined |
|
200 |
|
201 def __or__(self, other): |
|
202 self._merge_sanity_check(other) |
|
203 combined = self._clone() |
|
204 if isinstance(other, EmptyQuerySet): |
|
205 return combined |
|
206 combined.query.combine(other.query, sql.OR) |
|
207 return combined |
|
208 |
|
209 #################################### |
|
210 # METHODS THAT DO DATABASE QUERIES # |
|
211 #################################### |
|
212 |
|
213 def iterator(self): |
|
214 """ |
|
215 An iterator over the results from applying this QuerySet to the |
|
216 database. |
|
217 """ |
|
218 fill_cache = self.query.select_related |
|
219 if isinstance(fill_cache, dict): |
|
220 requested = fill_cache |
|
221 else: |
|
222 requested = None |
|
223 max_depth = self.query.max_depth |
|
224 |
|
225 extra_select = self.query.extra_select.keys() |
|
226 aggregate_select = self.query.aggregate_select.keys() |
|
227 |
|
228 only_load = self.query.get_loaded_field_names() |
|
229 if not fill_cache: |
|
230 fields = self.model._meta.fields |
|
231 pk_idx = self.model._meta.pk_index() |
|
232 |
|
233 index_start = len(extra_select) |
|
234 aggregate_start = index_start + len(self.model._meta.fields) |
|
235 |
|
236 load_fields = [] |
|
237 # If only/defer clauses have been specified, |
|
238 # build the list of fields that are to be loaded. |
|
239 if only_load: |
|
240 for field, model in self.model._meta.get_fields_with_model(): |
|
241 if model is None: |
|
242 model = self.model |
|
243 if field == self.model._meta.pk: |
|
244 # Record the index of the primary key when it is found |
|
245 pk_idx = len(load_fields) |
|
246 try: |
|
247 if field.name in only_load[model]: |
|
248 # Add a field that has been explicitly included |
|
249 load_fields.append(field.name) |
|
250 except KeyError: |
|
251 # Model wasn't explicitly listed in the only_load table |
|
252 # Therefore, we need to load all fields from this model |
|
253 load_fields.append(field.name) |
|
254 |
|
255 skip = None |
|
256 if load_fields and not fill_cache: |
|
257 # Some fields have been deferred, so we have to initialise |
|
258 # via keyword arguments. |
|
259 skip = set() |
|
260 init_list = [] |
|
261 for field in fields: |
|
262 if field.name not in load_fields: |
|
263 skip.add(field.attname) |
|
264 else: |
|
265 init_list.append(field.attname) |
|
266 model_cls = deferred_class_factory(self.model, skip) |
|
267 |
|
268 compiler = self.query.get_compiler(using=self.db) |
|
269 for row in compiler.results_iter(): |
|
270 if fill_cache: |
|
271 obj, _ = get_cached_row(self.model, row, |
|
272 index_start, using=self.db, max_depth=max_depth, |
|
273 requested=requested, offset=len(aggregate_select), |
|
274 only_load=only_load) |
|
275 else: |
|
276 if skip: |
|
277 row_data = row[index_start:aggregate_start] |
|
278 pk_val = row_data[pk_idx] |
|
279 obj = model_cls(**dict(zip(init_list, row_data))) |
|
280 else: |
|
281 # Omit aggregates in object creation. |
|
282 obj = self.model(*row[index_start:aggregate_start]) |
|
283 |
|
284 # Store the source database of the object |
|
285 obj._state.db = self.db |
|
286 |
|
287 for i, k in enumerate(extra_select): |
|
288 setattr(obj, k, row[i]) |
|
289 |
|
290 # Add the aggregates to the model |
|
291 for i, aggregate in enumerate(aggregate_select): |
|
292 setattr(obj, aggregate, row[i+aggregate_start]) |
|
293 |
|
294 yield obj |
|
295 |
|
296 def aggregate(self, *args, **kwargs): |
|
297 """ |
|
298 Returns a dictionary containing the calculations (aggregation) |
|
299 over the current queryset |
|
300 |
|
301 If args is present the expression is passed as a kwarg using |
|
302 the Aggregate object's default alias. |
|
303 """ |
|
304 for arg in args: |
|
305 kwargs[arg.default_alias] = arg |
|
306 |
|
307 query = self.query.clone() |
|
308 |
|
309 for (alias, aggregate_expr) in kwargs.items(): |
|
310 query.add_aggregate(aggregate_expr, self.model, alias, |
|
311 is_summary=True) |
|
312 |
|
313 return query.get_aggregation(using=self.db) |
|
314 |
|
315 def count(self): |
|
316 """ |
|
317 Performs a SELECT COUNT() and returns the number of records as an |
|
318 integer. |
|
319 |
|
320 If the QuerySet is already fully cached this simply returns the length |
|
321 of the cached results set to avoid multiple SELECT COUNT(*) calls. |
|
322 """ |
|
323 if self._result_cache is not None and not self._iter: |
|
324 return len(self._result_cache) |
|
325 |
|
326 return self.query.get_count(using=self.db) |
|
327 |
|
328 def get(self, *args, **kwargs): |
|
329 """ |
|
330 Performs the query and returns a single object matching the given |
|
331 keyword arguments. |
|
332 """ |
|
333 clone = self.filter(*args, **kwargs) |
|
334 if self.query.can_filter(): |
|
335 clone = clone.order_by() |
|
336 num = len(clone) |
|
337 if num == 1: |
|
338 return clone._result_cache[0] |
|
339 if not num: |
|
340 raise self.model.DoesNotExist("%s matching query does not exist." |
|
341 % self.model._meta.object_name) |
|
342 raise self.model.MultipleObjectsReturned("get() returned more than one %s -- it returned %s! Lookup parameters were %s" |
|
343 % (self.model._meta.object_name, num, kwargs)) |
|
344 |
|
345 def create(self, **kwargs): |
|
346 """ |
|
347 Creates a new object with the given kwargs, saving it to the database |
|
348 and returning the created object. |
|
349 """ |
|
350 obj = self.model(**kwargs) |
|
351 self._for_write = True |
|
352 obj.save(force_insert=True, using=self.db) |
|
353 return obj |
|
354 |
|
355 def get_or_create(self, **kwargs): |
|
356 """ |
|
357 Looks up an object with the given kwargs, creating one if necessary. |
|
358 Returns a tuple of (object, created), where created is a boolean |
|
359 specifying whether an object was created. |
|
360 """ |
|
361 assert kwargs, \ |
|
362 'get_or_create() must be passed at least one keyword argument' |
|
363 defaults = kwargs.pop('defaults', {}) |
|
364 try: |
|
365 self._for_write = True |
|
366 return self.get(**kwargs), False |
|
367 except self.model.DoesNotExist: |
|
368 try: |
|
369 params = dict([(k, v) for k, v in kwargs.items() if '__' not in k]) |
|
370 params.update(defaults) |
|
371 obj = self.model(**params) |
|
372 sid = transaction.savepoint(using=self.db) |
|
373 obj.save(force_insert=True, using=self.db) |
|
374 transaction.savepoint_commit(sid, using=self.db) |
|
375 return obj, True |
|
376 except IntegrityError, e: |
|
377 transaction.savepoint_rollback(sid, using=self.db) |
|
378 try: |
|
379 return self.get(**kwargs), False |
|
380 except self.model.DoesNotExist: |
|
381 raise e |
|
382 |
|
383 def latest(self, field_name=None): |
|
384 """ |
|
385 Returns the latest object, according to the model's 'get_latest_by' |
|
386 option or optional given field_name. |
|
387 """ |
|
388 latest_by = field_name or self.model._meta.get_latest_by |
|
389 assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model" |
|
390 assert self.query.can_filter(), \ |
|
391 "Cannot change a query once a slice has been taken." |
|
392 obj = self._clone() |
|
393 obj.query.set_limits(high=1) |
|
394 obj.query.add_ordering('-%s' % latest_by) |
|
395 return obj.get() |
|
396 |
|
397 def in_bulk(self, id_list): |
|
398 """ |
|
399 Returns a dictionary mapping each of the given IDs to the object with |
|
400 that ID. |
|
401 """ |
|
402 assert self.query.can_filter(), \ |
|
403 "Cannot use 'limit' or 'offset' with in_bulk" |
|
404 assert isinstance(id_list, (tuple, list, set, frozenset)), \ |
|
405 "in_bulk() must be provided with a list of IDs." |
|
406 if not id_list: |
|
407 return {} |
|
408 qs = self._clone() |
|
409 qs.query.add_filter(('pk__in', id_list)) |
|
410 return dict([(obj._get_pk_val(), obj) for obj in qs.iterator()]) |
|
411 |
|
412 def delete(self): |
|
413 """ |
|
414 Deletes the records in the current QuerySet. |
|
415 """ |
|
416 assert self.query.can_filter(), \ |
|
417 "Cannot use 'limit' or 'offset' with delete." |
|
418 |
|
419 del_query = self._clone() |
|
420 |
|
421 # The delete is actually 2 queries - one to find related objects, |
|
422 # and one to delete. Make sure that the discovery of related |
|
423 # objects is performed on the same database as the deletion. |
|
424 del_query._for_write = True |
|
425 |
|
426 # Disable non-supported fields. |
|
427 del_query.query.select_related = False |
|
428 del_query.query.clear_ordering() |
|
429 |
|
430 # Delete objects in chunks to prevent the list of related objects from |
|
431 # becoming too long. |
|
432 seen_objs = None |
|
433 del_itr = iter(del_query) |
|
434 while 1: |
|
435 # Collect a chunk of objects to be deleted, and then all the |
|
436 # objects that are related to the objects that are to be deleted. |
|
437 # The chunking *isn't* done by slicing the del_query because we |
|
438 # need to maintain the query cache on del_query (see #12328) |
|
439 seen_objs = CollectedObjects(seen_objs) |
|
440 for i, obj in izip(xrange(CHUNK_SIZE), del_itr): |
|
441 obj._collect_sub_objects(seen_objs) |
|
442 |
|
443 if not seen_objs: |
|
444 break |
|
445 delete_objects(seen_objs, del_query.db) |
|
446 |
|
447 # Clear the result cache, in case this QuerySet gets reused. |
|
448 self._result_cache = None |
|
449 delete.alters_data = True |
|
450 |
|
451 def update(self, **kwargs): |
|
452 """ |
|
453 Updates all elements in the current QuerySet, setting all the given |
|
454 fields to the appropriate values. |
|
455 """ |
|
456 assert self.query.can_filter(), \ |
|
457 "Cannot update a query once a slice has been taken." |
|
458 self._for_write = True |
|
459 query = self.query.clone(sql.UpdateQuery) |
|
460 query.add_update_values(kwargs) |
|
461 if not transaction.is_managed(using=self.db): |
|
462 transaction.enter_transaction_management(using=self.db) |
|
463 forced_managed = True |
|
464 else: |
|
465 forced_managed = False |
|
466 try: |
|
467 rows = query.get_compiler(self.db).execute_sql(None) |
|
468 if forced_managed: |
|
469 transaction.commit(using=self.db) |
|
470 else: |
|
471 transaction.commit_unless_managed(using=self.db) |
|
472 finally: |
|
473 if forced_managed: |
|
474 transaction.leave_transaction_management(using=self.db) |
|
475 self._result_cache = None |
|
476 return rows |
|
477 update.alters_data = True |
|
478 |
|
479 def _update(self, values): |
|
480 """ |
|
481 A version of update that accepts field objects instead of field names. |
|
482 Used primarily for model saving and not intended for use by general |
|
483 code (it requires too much poking around at model internals to be |
|
484 useful at that level). |
|
485 """ |
|
486 assert self.query.can_filter(), \ |
|
487 "Cannot update a query once a slice has been taken." |
|
488 query = self.query.clone(sql.UpdateQuery) |
|
489 query.add_update_fields(values) |
|
490 self._result_cache = None |
|
491 return query.get_compiler(self.db).execute_sql(None) |
|
492 _update.alters_data = True |
|
493 |
|
494 def exists(self): |
|
495 if self._result_cache is None: |
|
496 return self.query.has_results(using=self.db) |
|
497 return bool(self._result_cache) |
|
498 |
|
499 ################################################## |
|
500 # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # |
|
501 ################################################## |
|
502 |
|
503 def values(self, *fields): |
|
504 return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields) |
|
505 |
|
506 def values_list(self, *fields, **kwargs): |
|
507 flat = kwargs.pop('flat', False) |
|
508 if kwargs: |
|
509 raise TypeError('Unexpected keyword arguments to values_list: %s' |
|
510 % (kwargs.keys(),)) |
|
511 if flat and len(fields) > 1: |
|
512 raise TypeError("'flat' is not valid when values_list is called with more than one field.") |
|
513 return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat, |
|
514 _fields=fields) |
|
515 |
|
516 def dates(self, field_name, kind, order='ASC'): |
|
517 """ |
|
518 Returns a list of datetime objects representing all available dates for |
|
519 the given field_name, scoped to 'kind'. |
|
520 """ |
|
521 assert kind in ("month", "year", "day"), \ |
|
522 "'kind' must be one of 'year', 'month' or 'day'." |
|
523 assert order in ('ASC', 'DESC'), \ |
|
524 "'order' must be either 'ASC' or 'DESC'." |
|
525 return self._clone(klass=DateQuerySet, setup=True, |
|
526 _field_name=field_name, _kind=kind, _order=order) |
|
527 |
|
528 def none(self): |
|
529 """ |
|
530 Returns an empty QuerySet. |
|
531 """ |
|
532 return self._clone(klass=EmptyQuerySet) |
|
533 |
|
534 ################################################################## |
|
535 # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # |
|
536 ################################################################## |
|
537 |
|
538 def all(self): |
|
539 """ |
|
540 Returns a new QuerySet that is a copy of the current one. This allows a |
|
541 QuerySet to proxy for a model manager in some cases. |
|
542 """ |
|
543 return self._clone() |
|
544 |
|
545 def filter(self, *args, **kwargs): |
|
546 """ |
|
547 Returns a new QuerySet instance with the args ANDed to the existing |
|
548 set. |
|
549 """ |
|
550 return self._filter_or_exclude(False, *args, **kwargs) |
|
551 |
|
552 def exclude(self, *args, **kwargs): |
|
553 """ |
|
554 Returns a new QuerySet instance with NOT (args) ANDed to the existing |
|
555 set. |
|
556 """ |
|
557 return self._filter_or_exclude(True, *args, **kwargs) |
|
558 |
|
559 def _filter_or_exclude(self, negate, *args, **kwargs): |
|
560 if args or kwargs: |
|
561 assert self.query.can_filter(), \ |
|
562 "Cannot filter a query once a slice has been taken." |
|
563 |
|
564 clone = self._clone() |
|
565 if negate: |
|
566 clone.query.add_q(~Q(*args, **kwargs)) |
|
567 else: |
|
568 clone.query.add_q(Q(*args, **kwargs)) |
|
569 return clone |
|
570 |
|
571 def complex_filter(self, filter_obj): |
|
572 """ |
|
573 Returns a new QuerySet instance with filter_obj added to the filters. |
|
574 |
|
575 filter_obj can be a Q object (or anything with an add_to_query() |
|
576 method) or a dictionary of keyword lookup arguments. |
|
577 |
|
578 This exists to support framework features such as 'limit_choices_to', |
|
579 and usually it will be more natural to use other methods. |
|
580 """ |
|
581 if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'): |
|
582 clone = self._clone() |
|
583 clone.query.add_q(filter_obj) |
|
584 return clone |
|
585 else: |
|
586 return self._filter_or_exclude(None, **filter_obj) |
|
587 |
|
588 def select_related(self, *fields, **kwargs): |
|
589 """ |
|
590 Returns a new QuerySet instance that will select related objects. |
|
591 |
|
592 If fields are specified, they must be ForeignKey fields and only those |
|
593 related objects are included in the selection. |
|
594 """ |
|
595 depth = kwargs.pop('depth', 0) |
|
596 if kwargs: |
|
597 raise TypeError('Unexpected keyword arguments to select_related: %s' |
|
598 % (kwargs.keys(),)) |
|
599 obj = self._clone() |
|
600 if fields: |
|
601 if depth: |
|
602 raise TypeError('Cannot pass both "depth" and fields to select_related()') |
|
603 obj.query.add_select_related(fields) |
|
604 else: |
|
605 obj.query.select_related = True |
|
606 if depth: |
|
607 obj.query.max_depth = depth |
|
608 return obj |
|
609 |
|
610 def dup_select_related(self, other): |
|
611 """ |
|
612 Copies the related selection status from the QuerySet 'other' to the |
|
613 current QuerySet. |
|
614 """ |
|
615 self.query.select_related = other.query.select_related |
|
616 |
|
617 def annotate(self, *args, **kwargs): |
|
618 """ |
|
619 Return a query set in which the returned objects have been annotated |
|
620 with data aggregated from related fields. |
|
621 """ |
|
622 for arg in args: |
|
623 kwargs[arg.default_alias] = arg |
|
624 |
|
625 obj = self._clone() |
|
626 |
|
627 obj._setup_aggregate_query(kwargs.keys()) |
|
628 |
|
629 # Add the aggregates to the query |
|
630 for (alias, aggregate_expr) in kwargs.items(): |
|
631 obj.query.add_aggregate(aggregate_expr, self.model, alias, |
|
632 is_summary=False) |
|
633 |
|
634 return obj |
|
635 |
|
636 def order_by(self, *field_names): |
|
637 """ |
|
638 Returns a new QuerySet instance with the ordering changed. |
|
639 """ |
|
640 assert self.query.can_filter(), \ |
|
641 "Cannot reorder a query once a slice has been taken." |
|
642 obj = self._clone() |
|
643 obj.query.clear_ordering() |
|
644 obj.query.add_ordering(*field_names) |
|
645 return obj |
|
646 |
|
647 def distinct(self, true_or_false=True): |
|
648 """ |
|
649 Returns a new QuerySet instance that will select only distinct results. |
|
650 """ |
|
651 obj = self._clone() |
|
652 obj.query.distinct = true_or_false |
|
653 return obj |
|
654 |
|
655 def extra(self, select=None, where=None, params=None, tables=None, |
|
656 order_by=None, select_params=None): |
|
657 """ |
|
658 Adds extra SQL fragments to the query. |
|
659 """ |
|
660 assert self.query.can_filter(), \ |
|
661 "Cannot change a query once a slice has been taken" |
|
662 clone = self._clone() |
|
663 clone.query.add_extra(select, select_params, where, params, tables, order_by) |
|
664 return clone |
|
665 |
|
666 def reverse(self): |
|
667 """ |
|
668 Reverses the ordering of the QuerySet. |
|
669 """ |
|
670 clone = self._clone() |
|
671 clone.query.standard_ordering = not clone.query.standard_ordering |
|
672 return clone |
|
673 |
|
674 def defer(self, *fields): |
|
675 """ |
|
676 Defers the loading of data for certain fields until they are accessed. |
|
677 The set of fields to defer is added to any existing set of deferred |
|
678 fields. The only exception to this is if None is passed in as the only |
|
679 parameter, in which case all deferrals are removed (None acts as a |
|
680 reset option). |
|
681 """ |
|
682 clone = self._clone() |
|
683 if fields == (None,): |
|
684 clone.query.clear_deferred_loading() |
|
685 else: |
|
686 clone.query.add_deferred_loading(fields) |
|
687 return clone |
|
688 |
|
689 def only(self, *fields): |
|
690 """ |
|
691 Essentially, the opposite of defer. Only the fields passed into this |
|
692 method and that are not already specified as deferred are loaded |
|
693 immediately when the queryset is evaluated. |
|
694 """ |
|
695 if fields == (None,): |
|
696 # Can only pass None to defer(), not only(), as the rest option. |
|
697 # That won't stop people trying to do this, so let's be explicit. |
|
698 raise TypeError("Cannot pass None as an argument to only().") |
|
699 clone = self._clone() |
|
700 clone.query.add_immediate_loading(fields) |
|
701 return clone |
|
702 |
|
703 def using(self, alias): |
|
704 """ |
|
705 Selects which database this QuerySet should excecute it's query against. |
|
706 """ |
|
707 clone = self._clone() |
|
708 clone._db = alias |
|
709 return clone |
|
710 |
|
711 ################################### |
|
712 # PUBLIC INTROSPECTION ATTRIBUTES # |
|
713 ################################### |
|
714 |
|
715 def ordered(self): |
|
716 """ |
|
717 Returns True if the QuerySet is ordered -- i.e. has an order_by() |
|
718 clause or a default ordering on the model. |
|
719 """ |
|
720 if self.query.extra_order_by or self.query.order_by: |
|
721 return True |
|
722 elif self.query.default_ordering and self.query.model._meta.ordering: |
|
723 return True |
|
724 else: |
|
725 return False |
|
726 ordered = property(ordered) |
|
727 |
|
728 @property |
|
729 def db(self): |
|
730 "Return the database that will be used if this query is executed now" |
|
731 if self._for_write: |
|
732 return self._db or router.db_for_write(self.model) |
|
733 return self._db or router.db_for_read(self.model) |
|
734 |
|
735 ################### |
|
736 # PRIVATE METHODS # |
|
737 ################### |
|
738 |
|
739 def _clone(self, klass=None, setup=False, **kwargs): |
|
740 if klass is None: |
|
741 klass = self.__class__ |
|
742 query = self.query.clone() |
|
743 if self._sticky_filter: |
|
744 query.filter_is_sticky = True |
|
745 c = klass(model=self.model, query=query, using=self._db) |
|
746 c._for_write = self._for_write |
|
747 c.__dict__.update(kwargs) |
|
748 if setup and hasattr(c, '_setup_query'): |
|
749 c._setup_query() |
|
750 return c |
|
751 |
|
752 def _fill_cache(self, num=None): |
|
753 """ |
|
754 Fills the result cache with 'num' more entries (or until the results |
|
755 iterator is exhausted). |
|
756 """ |
|
757 if self._iter: |
|
758 try: |
|
759 for i in range(num or ITER_CHUNK_SIZE): |
|
760 self._result_cache.append(self._iter.next()) |
|
761 except StopIteration: |
|
762 self._iter = None |
|
763 |
|
764 def _next_is_sticky(self): |
|
765 """ |
|
766 Indicates that the next filter call and the one following that should |
|
767 be treated as a single filter. This is only important when it comes to |
|
768 determining when to reuse tables for many-to-many filters. Required so |
|
769 that we can filter naturally on the results of related managers. |
|
770 |
|
771 This doesn't return a clone of the current QuerySet (it returns |
|
772 "self"). The method is only used internally and should be immediately |
|
773 followed by a filter() that does create a clone. |
|
774 """ |
|
775 self._sticky_filter = True |
|
776 return self |
|
777 |
|
778 def _merge_sanity_check(self, other): |
|
779 """ |
|
780 Checks that we are merging two comparable QuerySet classes. By default |
|
781 this does nothing, but see the ValuesQuerySet for an example of where |
|
782 it's useful. |
|
783 """ |
|
784 pass |
|
785 |
|
786 def _setup_aggregate_query(self, aggregates): |
|
787 """ |
|
788 Prepare the query for computing a result that contains aggregate annotations. |
|
789 """ |
|
790 opts = self.model._meta |
|
791 if self.query.group_by is None: |
|
792 field_names = [f.attname for f in opts.fields] |
|
793 self.query.add_fields(field_names, False) |
|
794 self.query.set_group_by() |
|
795 |
|
796 def _prepare(self): |
|
797 return self |
|
798 |
|
799 def _as_sql(self, connection): |
|
800 """ |
|
801 Returns the internal query's SQL and parameters (as a tuple). |
|
802 """ |
|
803 obj = self.values("pk") |
|
804 if obj._db is None or connection == connections[obj._db]: |
|
805 return obj.query.get_compiler(connection=connection).as_nested_sql() |
|
806 raise ValueError("Can't do subqueries with queries on different DBs.") |
|
807 |
|
808 # When used as part of a nested query, a queryset will never be an "always |
|
809 # empty" result. |
|
810 value_annotation = True |
|
811 |
|
812 class ValuesQuerySet(QuerySet): |
|
813 def __init__(self, *args, **kwargs): |
|
814 super(ValuesQuerySet, self).__init__(*args, **kwargs) |
|
815 # select_related isn't supported in values(). (FIXME -#3358) |
|
816 self.query.select_related = False |
|
817 |
|
818 # QuerySet.clone() will also set up the _fields attribute with the |
|
819 # names of the model fields to select. |
|
820 |
|
821 def iterator(self): |
|
822 # Purge any extra columns that haven't been explicitly asked for |
|
823 extra_names = self.query.extra_select.keys() |
|
824 field_names = self.field_names |
|
825 aggregate_names = self.query.aggregate_select.keys() |
|
826 |
|
827 names = extra_names + field_names + aggregate_names |
|
828 |
|
829 for row in self.query.get_compiler(self.db).results_iter(): |
|
830 yield dict(zip(names, row)) |
|
831 |
|
832 def _setup_query(self): |
|
833 """ |
|
834 Constructs the field_names list that the values query will be |
|
835 retrieving. |
|
836 |
|
837 Called by the _clone() method after initializing the rest of the |
|
838 instance. |
|
839 """ |
|
840 self.query.clear_deferred_loading() |
|
841 self.query.clear_select_fields() |
|
842 |
|
843 if self._fields: |
|
844 self.extra_names = [] |
|
845 self.aggregate_names = [] |
|
846 if not self.query.extra and not self.query.aggregates: |
|
847 # Short cut - if there are no extra or aggregates, then |
|
848 # the values() clause must be just field names. |
|
849 self.field_names = list(self._fields) |
|
850 else: |
|
851 self.query.default_cols = False |
|
852 self.field_names = [] |
|
853 for f in self._fields: |
|
854 # we inspect the full extra_select list since we might |
|
855 # be adding back an extra select item that we hadn't |
|
856 # had selected previously. |
|
857 if self.query.extra.has_key(f): |
|
858 self.extra_names.append(f) |
|
859 elif self.query.aggregate_select.has_key(f): |
|
860 self.aggregate_names.append(f) |
|
861 else: |
|
862 self.field_names.append(f) |
|
863 else: |
|
864 # Default to all fields. |
|
865 self.extra_names = None |
|
866 self.field_names = [f.attname for f in self.model._meta.fields] |
|
867 self.aggregate_names = None |
|
868 |
|
869 self.query.select = [] |
|
870 if self.extra_names is not None: |
|
871 self.query.set_extra_mask(self.extra_names) |
|
872 self.query.add_fields(self.field_names, False) |
|
873 if self.aggregate_names is not None: |
|
874 self.query.set_aggregate_mask(self.aggregate_names) |
|
875 |
|
876 def _clone(self, klass=None, setup=False, **kwargs): |
|
877 """ |
|
878 Cloning a ValuesQuerySet preserves the current fields. |
|
879 """ |
|
880 c = super(ValuesQuerySet, self)._clone(klass, **kwargs) |
|
881 if not hasattr(c, '_fields'): |
|
882 # Only clone self._fields if _fields wasn't passed into the cloning |
|
883 # call directly. |
|
884 c._fields = self._fields[:] |
|
885 c.field_names = self.field_names |
|
886 c.extra_names = self.extra_names |
|
887 c.aggregate_names = self.aggregate_names |
|
888 if setup and hasattr(c, '_setup_query'): |
|
889 c._setup_query() |
|
890 return c |
|
891 |
|
892 def _merge_sanity_check(self, other): |
|
893 super(ValuesQuerySet, self)._merge_sanity_check(other) |
|
894 if (set(self.extra_names) != set(other.extra_names) or |
|
895 set(self.field_names) != set(other.field_names) or |
|
896 self.aggregate_names != other.aggregate_names): |
|
897 raise TypeError("Merging '%s' classes must involve the same values in each case." |
|
898 % self.__class__.__name__) |
|
899 |
|
900 def _setup_aggregate_query(self, aggregates): |
|
901 """ |
|
902 Prepare the query for computing a result that contains aggregate annotations. |
|
903 """ |
|
904 self.query.set_group_by() |
|
905 |
|
906 if self.aggregate_names is not None: |
|
907 self.aggregate_names.extend(aggregates) |
|
908 self.query.set_aggregate_mask(self.aggregate_names) |
|
909 |
|
910 super(ValuesQuerySet, self)._setup_aggregate_query(aggregates) |
|
911 |
|
912 def _as_sql(self, connection): |
|
913 """ |
|
914 For ValueQuerySet (and subclasses like ValuesListQuerySet), they can |
|
915 only be used as nested queries if they're already set up to select only |
|
916 a single field (in which case, that is the field column that is |
|
917 returned). This differs from QuerySet.as_sql(), where the column to |
|
918 select is set up by Django. |
|
919 """ |
|
920 if ((self._fields and len(self._fields) > 1) or |
|
921 (not self._fields and len(self.model._meta.fields) > 1)): |
|
922 raise TypeError('Cannot use a multi-field %s as a filter value.' |
|
923 % self.__class__.__name__) |
|
924 |
|
925 obj = self._clone() |
|
926 if obj._db is None or connection == connections[obj._db]: |
|
927 return obj.query.get_compiler(connection=connection).as_nested_sql() |
|
928 raise ValueError("Can't do subqueries with queries on different DBs.") |
|
929 |
|
930 def _prepare(self): |
|
931 """ |
|
932 Validates that we aren't trying to do a query like |
|
933 value__in=qs.values('value1', 'value2'), which isn't valid. |
|
934 """ |
|
935 if ((self._fields and len(self._fields) > 1) or |
|
936 (not self._fields and len(self.model._meta.fields) > 1)): |
|
937 raise TypeError('Cannot use a multi-field %s as a filter value.' |
|
938 % self.__class__.__name__) |
|
939 return self |
|
940 |
|
941 class ValuesListQuerySet(ValuesQuerySet): |
|
942 def iterator(self): |
|
943 if self.flat and len(self._fields) == 1: |
|
944 for row in self.query.get_compiler(self.db).results_iter(): |
|
945 yield row[0] |
|
946 elif not self.query.extra_select and not self.query.aggregate_select: |
|
947 for row in self.query.get_compiler(self.db).results_iter(): |
|
948 yield tuple(row) |
|
949 else: |
|
950 # When extra(select=...) or an annotation is involved, the extra |
|
951 # cols are always at the start of the row, and we need to reorder |
|
952 # the fields to match the order in self._fields. |
|
953 extra_names = self.query.extra_select.keys() |
|
954 field_names = self.field_names |
|
955 aggregate_names = self.query.aggregate_select.keys() |
|
956 |
|
957 names = extra_names + field_names + aggregate_names |
|
958 |
|
959 # If a field list has been specified, use it. Otherwise, use the |
|
960 # full list of fields, including extras and aggregates. |
|
961 if self._fields: |
|
962 fields = list(self._fields) + filter(lambda f: f not in self._fields, |
|
963 aggregate_names) |
|
964 else: |
|
965 fields = names |
|
966 |
|
967 for row in self.query.get_compiler(self.db).results_iter(): |
|
968 data = dict(zip(names, row)) |
|
969 yield tuple([data[f] for f in fields]) |
|
970 |
|
971 def _clone(self, *args, **kwargs): |
|
972 clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs) |
|
973 clone.flat = self.flat |
|
974 return clone |
|
975 |
|
976 |
|
977 class DateQuerySet(QuerySet): |
|
978 def iterator(self): |
|
979 return self.query.get_compiler(self.db).results_iter() |
|
980 |
|
981 def _setup_query(self): |
|
982 """ |
|
983 Sets up any special features of the query attribute. |
|
984 |
|
985 Called by the _clone() method after initializing the rest of the |
|
986 instance. |
|
987 """ |
|
988 self.query.clear_deferred_loading() |
|
989 self.query = self.query.clone(klass=sql.DateQuery, setup=True) |
|
990 self.query.select = [] |
|
991 field = self.model._meta.get_field(self._field_name, many_to_many=False) |
|
992 assert isinstance(field, DateField), "%r isn't a DateField." \ |
|
993 % field.name |
|
994 self.query.add_date_select(field, self._kind, self._order) |
|
995 if field.null: |
|
996 self.query.add_filter(('%s__isnull' % field.name, False)) |
|
997 |
|
998 def _clone(self, klass=None, setup=False, **kwargs): |
|
999 c = super(DateQuerySet, self)._clone(klass, False, **kwargs) |
|
1000 c._field_name = self._field_name |
|
1001 c._kind = self._kind |
|
1002 if setup and hasattr(c, '_setup_query'): |
|
1003 c._setup_query() |
|
1004 return c |
|
1005 |
|
1006 |
|
1007 class EmptyQuerySet(QuerySet): |
|
1008 def __init__(self, model=None, query=None, using=None): |
|
1009 super(EmptyQuerySet, self).__init__(model, query, using) |
|
1010 self._result_cache = [] |
|
1011 |
|
1012 def __and__(self, other): |
|
1013 return self._clone() |
|
1014 |
|
1015 def __or__(self, other): |
|
1016 return other._clone() |
|
1017 |
|
1018 def count(self): |
|
1019 return 0 |
|
1020 |
|
1021 def delete(self): |
|
1022 pass |
|
1023 |
|
1024 def _clone(self, klass=None, setup=False, **kwargs): |
|
1025 c = super(EmptyQuerySet, self)._clone(klass, **kwargs) |
|
1026 c._result_cache = [] |
|
1027 return c |
|
1028 |
|
1029 def iterator(self): |
|
1030 # This slightly odd construction is because we need an empty generator |
|
1031 # (it raises StopIteration immediately). |
|
1032 yield iter([]).next() |
|
1033 |
|
1034 def all(self): |
|
1035 """ |
|
1036 Always returns EmptyQuerySet. |
|
1037 """ |
|
1038 return self |
|
1039 |
|
1040 def filter(self, *args, **kwargs): |
|
1041 """ |
|
1042 Always returns EmptyQuerySet. |
|
1043 """ |
|
1044 return self |
|
1045 |
|
1046 def exclude(self, *args, **kwargs): |
|
1047 """ |
|
1048 Always returns EmptyQuerySet. |
|
1049 """ |
|
1050 return self |
|
1051 |
|
1052 def complex_filter(self, filter_obj): |
|
1053 """ |
|
1054 Always returns EmptyQuerySet. |
|
1055 """ |
|
1056 return self |
|
1057 |
|
1058 def select_related(self, *fields, **kwargs): |
|
1059 """ |
|
1060 Always returns EmptyQuerySet. |
|
1061 """ |
|
1062 return self |
|
1063 |
|
1064 def annotate(self, *args, **kwargs): |
|
1065 """ |
|
1066 Always returns EmptyQuerySet. |
|
1067 """ |
|
1068 return self |
|
1069 |
|
1070 def order_by(self, *field_names): |
|
1071 """ |
|
1072 Always returns EmptyQuerySet. |
|
1073 """ |
|
1074 return self |
|
1075 |
|
1076 def distinct(self, true_or_false=True): |
|
1077 """ |
|
1078 Always returns EmptyQuerySet. |
|
1079 """ |
|
1080 return self |
|
1081 |
|
1082 def extra(self, select=None, where=None, params=None, tables=None, |
|
1083 order_by=None, select_params=None): |
|
1084 """ |
|
1085 Always returns EmptyQuerySet. |
|
1086 """ |
|
1087 assert self.query.can_filter(), \ |
|
1088 "Cannot change a query once a slice has been taken" |
|
1089 return self |
|
1090 |
|
1091 def reverse(self): |
|
1092 """ |
|
1093 Always returns EmptyQuerySet. |
|
1094 """ |
|
1095 return self |
|
1096 |
|
1097 def defer(self, *fields): |
|
1098 """ |
|
1099 Always returns EmptyQuerySet. |
|
1100 """ |
|
1101 return self |
|
1102 |
|
1103 def only(self, *fields): |
|
1104 """ |
|
1105 Always returns EmptyQuerySet. |
|
1106 """ |
|
1107 return self |
|
1108 |
|
1109 def update(self, **kwargs): |
|
1110 """ |
|
1111 Don't update anything. |
|
1112 """ |
|
1113 return 0 |
|
1114 |
|
1115 # EmptyQuerySet is always an empty result in where-clauses (and similar |
|
1116 # situations). |
|
1117 value_annotation = False |
|
1118 |
|
1119 |
|
1120 def get_cached_row(klass, row, index_start, using, max_depth=0, cur_depth=0, |
|
1121 requested=None, offset=0, only_load=None, local_only=False): |
|
1122 """ |
|
1123 Helper function that recursively returns an object with the specified |
|
1124 related attributes already populated. |
|
1125 |
|
1126 This method may be called recursively to populate deep select_related() |
|
1127 clauses. |
|
1128 |
|
1129 Arguments: |
|
1130 * klass - the class to retrieve (and instantiate) |
|
1131 * row - the row of data returned by the database cursor |
|
1132 * index_start - the index of the row at which data for this |
|
1133 object is known to start |
|
1134 * using - the database alias on which the query is being executed. |
|
1135 * max_depth - the maximum depth to which a select_related() |
|
1136 relationship should be explored. |
|
1137 * cur_depth - the current depth in the select_related() tree. |
|
1138 Used in recursive calls to determin if we should dig deeper. |
|
1139 * requested - A dictionary describing the select_related() tree |
|
1140 that is to be retrieved. keys are field names; values are |
|
1141 dictionaries describing the keys on that related object that |
|
1142 are themselves to be select_related(). |
|
1143 * offset - the number of additional fields that are known to |
|
1144 exist in `row` for `klass`. This usually means the number of |
|
1145 annotated results on `klass`. |
|
1146 * only_load - if the query has had only() or defer() applied, |
|
1147 this is the list of field names that will be returned. If None, |
|
1148 the full field list for `klass` can be assumed. |
|
1149 * local_only - Only populate local fields. This is used when building |
|
1150 following reverse select-related relations |
|
1151 """ |
|
1152 if max_depth and requested is None and cur_depth > max_depth: |
|
1153 # We've recursed deeply enough; stop now. |
|
1154 return None |
|
1155 |
|
1156 restricted = requested is not None |
|
1157 if only_load: |
|
1158 load_fields = only_load.get(klass) |
|
1159 # When we create the object, we will also be creating populating |
|
1160 # all the parent classes, so traverse the parent classes looking |
|
1161 # for fields that must be included on load. |
|
1162 for parent in klass._meta.get_parent_list(): |
|
1163 fields = only_load.get(parent) |
|
1164 if fields: |
|
1165 load_fields.update(fields) |
|
1166 else: |
|
1167 load_fields = None |
|
1168 if load_fields: |
|
1169 # Handle deferred fields. |
|
1170 skip = set() |
|
1171 init_list = [] |
|
1172 # Build the list of fields that *haven't* been requested |
|
1173 for field, model in klass._meta.get_fields_with_model(): |
|
1174 if field.name not in load_fields: |
|
1175 skip.add(field.name) |
|
1176 elif local_only and model is not None: |
|
1177 continue |
|
1178 else: |
|
1179 init_list.append(field.attname) |
|
1180 # Retrieve all the requested fields |
|
1181 field_count = len(init_list) |
|
1182 fields = row[index_start : index_start + field_count] |
|
1183 # If all the select_related columns are None, then the related |
|
1184 # object must be non-existent - set the relation to None. |
|
1185 # Otherwise, construct the related object. |
|
1186 if fields == (None,) * field_count: |
|
1187 obj = None |
|
1188 elif skip: |
|
1189 klass = deferred_class_factory(klass, skip) |
|
1190 obj = klass(**dict(zip(init_list, fields))) |
|
1191 else: |
|
1192 obj = klass(*fields) |
|
1193 |
|
1194 else: |
|
1195 # Load all fields on klass |
|
1196 if local_only: |
|
1197 field_names = [f.attname for f in klass._meta.local_fields] |
|
1198 else: |
|
1199 field_names = [f.attname for f in klass._meta.fields] |
|
1200 field_count = len(field_names) |
|
1201 fields = row[index_start : index_start + field_count] |
|
1202 # If all the select_related columns are None, then the related |
|
1203 # object must be non-existent - set the relation to None. |
|
1204 # Otherwise, construct the related object. |
|
1205 if fields == (None,) * field_count: |
|
1206 obj = None |
|
1207 else: |
|
1208 obj = klass(**dict(zip(field_names, fields))) |
|
1209 |
|
1210 # If an object was retrieved, set the database state. |
|
1211 if obj: |
|
1212 obj._state.db = using |
|
1213 |
|
1214 index_end = index_start + field_count + offset |
|
1215 # Iterate over each related object, populating any |
|
1216 # select_related() fields |
|
1217 for f in klass._meta.fields: |
|
1218 if not select_related_descend(f, restricted, requested): |
|
1219 continue |
|
1220 if restricted: |
|
1221 next = requested[f.name] |
|
1222 else: |
|
1223 next = None |
|
1224 # Recursively retrieve the data for the related object |
|
1225 cached_row = get_cached_row(f.rel.to, row, index_end, using, |
|
1226 max_depth, cur_depth+1, next, only_load=only_load) |
|
1227 # If the recursive descent found an object, populate the |
|
1228 # descriptor caches relevant to the object |
|
1229 if cached_row: |
|
1230 rel_obj, index_end = cached_row |
|
1231 if obj is not None: |
|
1232 # If the base object exists, populate the |
|
1233 # descriptor cache |
|
1234 setattr(obj, f.get_cache_name(), rel_obj) |
|
1235 if f.unique and rel_obj is not None: |
|
1236 # If the field is unique, populate the |
|
1237 # reverse descriptor cache on the related object |
|
1238 setattr(rel_obj, f.related.get_cache_name(), obj) |
|
1239 |
|
1240 # Now do the same, but for reverse related objects. |
|
1241 # Only handle the restricted case - i.e., don't do a depth |
|
1242 # descent into reverse relations unless explicitly requested |
|
1243 if restricted: |
|
1244 related_fields = [ |
|
1245 (o.field, o.model) |
|
1246 for o in klass._meta.get_all_related_objects() |
|
1247 if o.field.unique |
|
1248 ] |
|
1249 for f, model in related_fields: |
|
1250 if not select_related_descend(f, restricted, requested, reverse=True): |
|
1251 continue |
|
1252 next = requested[f.related_query_name()] |
|
1253 # Recursively retrieve the data for the related object |
|
1254 cached_row = get_cached_row(model, row, index_end, using, |
|
1255 max_depth, cur_depth+1, next, only_load=only_load, local_only=True) |
|
1256 # If the recursive descent found an object, populate the |
|
1257 # descriptor caches relevant to the object |
|
1258 if cached_row: |
|
1259 rel_obj, index_end = cached_row |
|
1260 if obj is not None: |
|
1261 # If the field is unique, populate the |
|
1262 # reverse descriptor cache |
|
1263 setattr(obj, f.related.get_cache_name(), rel_obj) |
|
1264 if rel_obj is not None: |
|
1265 # If the related object exists, populate |
|
1266 # the descriptor cache. |
|
1267 setattr(rel_obj, f.get_cache_name(), obj) |
|
1268 # Now populate all the non-local field values |
|
1269 # on the related object |
|
1270 for rel_field,rel_model in rel_obj._meta.get_fields_with_model(): |
|
1271 if rel_model is not None: |
|
1272 setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname)) |
|
1273 # populate the field cache for any related object |
|
1274 # that has already been retrieved |
|
1275 if rel_field.rel: |
|
1276 try: |
|
1277 cached_obj = getattr(obj, rel_field.get_cache_name()) |
|
1278 setattr(rel_obj, rel_field.get_cache_name(), cached_obj) |
|
1279 except AttributeError: |
|
1280 # Related object hasn't been cached yet |
|
1281 pass |
|
1282 return obj, index_end |
|
1283 |
|
1284 def delete_objects(seen_objs, using): |
|
1285 """ |
|
1286 Iterate through a list of seen classes, and remove any instances that are |
|
1287 referred to. |
|
1288 """ |
|
1289 connection = connections[using] |
|
1290 if not transaction.is_managed(using=using): |
|
1291 transaction.enter_transaction_management(using=using) |
|
1292 forced_managed = True |
|
1293 else: |
|
1294 forced_managed = False |
|
1295 try: |
|
1296 ordered_classes = seen_objs.keys() |
|
1297 except CyclicDependency: |
|
1298 # If there is a cyclic dependency, we cannot in general delete the |
|
1299 # objects. However, if an appropriate transaction is set up, or if the |
|
1300 # database is lax enough, it will succeed. So for now, we go ahead and |
|
1301 # try anyway. |
|
1302 ordered_classes = seen_objs.unordered_keys() |
|
1303 |
|
1304 obj_pairs = {} |
|
1305 try: |
|
1306 for cls in ordered_classes: |
|
1307 items = seen_objs[cls].items() |
|
1308 items.sort() |
|
1309 obj_pairs[cls] = items |
|
1310 |
|
1311 # Pre-notify all instances to be deleted. |
|
1312 for pk_val, instance in items: |
|
1313 if not cls._meta.auto_created: |
|
1314 signals.pre_delete.send(sender=cls, instance=instance) |
|
1315 |
|
1316 pk_list = [pk for pk,instance in items] |
|
1317 |
|
1318 update_query = sql.UpdateQuery(cls) |
|
1319 for field, model in cls._meta.get_fields_with_model(): |
|
1320 if (field.rel and field.null and field.rel.to in seen_objs and |
|
1321 filter(lambda f: f.column == field.rel.get_related_field().column, |
|
1322 field.rel.to._meta.fields)): |
|
1323 if model: |
|
1324 sql.UpdateQuery(model).clear_related(field, pk_list, using=using) |
|
1325 else: |
|
1326 update_query.clear_related(field, pk_list, using=using) |
|
1327 |
|
1328 # Now delete the actual data. |
|
1329 for cls in ordered_classes: |
|
1330 items = obj_pairs[cls] |
|
1331 items.reverse() |
|
1332 |
|
1333 pk_list = [pk for pk,instance in items] |
|
1334 del_query = sql.DeleteQuery(cls) |
|
1335 del_query.delete_batch(pk_list, using=using) |
|
1336 |
|
1337 # Last cleanup; set NULLs where there once was a reference to the |
|
1338 # object, NULL the primary key of the found objects, and perform |
|
1339 # post-notification. |
|
1340 for pk_val, instance in items: |
|
1341 for field in cls._meta.fields: |
|
1342 if field.rel and field.null and field.rel.to in seen_objs: |
|
1343 setattr(instance, field.attname, None) |
|
1344 |
|
1345 if not cls._meta.auto_created: |
|
1346 signals.post_delete.send(sender=cls, instance=instance) |
|
1347 setattr(instance, cls._meta.pk.attname, None) |
|
1348 |
|
1349 if forced_managed: |
|
1350 transaction.commit(using=using) |
|
1351 else: |
|
1352 transaction.commit_unless_managed(using=using) |
|
1353 finally: |
|
1354 if forced_managed: |
|
1355 transaction.leave_transaction_management(using=using) |
|
1356 |
|
1357 class RawQuerySet(object): |
|
1358 """ |
|
1359 Provides an iterator which converts the results of raw SQL queries into |
|
1360 annotated model instances. |
|
1361 """ |
|
1362 def __init__(self, raw_query, model=None, query=None, params=None, |
|
1363 translations=None, using=None): |
|
1364 self.raw_query = raw_query |
|
1365 self.model = model |
|
1366 self._db = using |
|
1367 self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) |
|
1368 self.params = params or () |
|
1369 self.translations = translations or {} |
|
1370 |
|
1371 def __iter__(self): |
|
1372 for row in self.query: |
|
1373 yield self.transform_results(row) |
|
1374 |
|
1375 def __repr__(self): |
|
1376 return "<RawQuerySet: %r>" % (self.raw_query % self.params) |
|
1377 |
|
1378 def __getitem__(self, k): |
|
1379 return list(self)[k] |
|
1380 |
|
1381 @property |
|
1382 def db(self): |
|
1383 "Return the database that will be used if this query is executed now" |
|
1384 return self._db or router.db_for_read(self.model) |
|
1385 |
|
1386 def using(self, alias): |
|
1387 """ |
|
1388 Selects which database this Raw QuerySet should excecute it's query against. |
|
1389 """ |
|
1390 return RawQuerySet(self.raw_query, model=self.model, |
|
1391 query=self.query.clone(using=alias), |
|
1392 params=self.params, translations=self.translations, |
|
1393 using=alias) |
|
1394 |
|
1395 @property |
|
1396 def columns(self): |
|
1397 """ |
|
1398 A list of model field names in the order they'll appear in the |
|
1399 query results. |
|
1400 """ |
|
1401 if not hasattr(self, '_columns'): |
|
1402 self._columns = self.query.get_columns() |
|
1403 |
|
1404 # Adjust any column names which don't match field names |
|
1405 for (query_name, model_name) in self.translations.items(): |
|
1406 try: |
|
1407 index = self._columns.index(query_name) |
|
1408 self._columns[index] = model_name |
|
1409 except ValueError: |
|
1410 # Ignore translations for non-existant column names |
|
1411 pass |
|
1412 |
|
1413 return self._columns |
|
1414 |
|
1415 @property |
|
1416 def model_fields(self): |
|
1417 """ |
|
1418 A dict mapping column names to model field names. |
|
1419 """ |
|
1420 if not hasattr(self, '_model_fields'): |
|
1421 converter = connections[self.db].introspection.table_name_converter |
|
1422 self._model_fields = {} |
|
1423 for field in self.model._meta.fields: |
|
1424 name, column = field.get_attname_column() |
|
1425 self._model_fields[converter(column)] = field |
|
1426 return self._model_fields |
|
1427 |
|
1428 def transform_results(self, values): |
|
1429 model_init_kwargs = {} |
|
1430 annotations = () |
|
1431 |
|
1432 # Perform database backend type resolution |
|
1433 connection = connections[self.db] |
|
1434 compiler = connection.ops.compiler('SQLCompiler')(self.query, connection, self.db) |
|
1435 if hasattr(compiler, 'resolve_columns'): |
|
1436 fields = [self.model_fields.get(c,None) for c in self.columns] |
|
1437 values = compiler.resolve_columns(values, fields) |
|
1438 |
|
1439 # Associate fields to values |
|
1440 for pos, value in enumerate(values): |
|
1441 column = self.columns[pos] |
|
1442 |
|
1443 # Separate properties from annotations |
|
1444 if column in self.model_fields.keys(): |
|
1445 model_init_kwargs[self.model_fields[column].attname] = value |
|
1446 else: |
|
1447 annotations += (column, value), |
|
1448 |
|
1449 # Construct model instance and apply annotations |
|
1450 skip = set() |
|
1451 for field in self.model._meta.fields: |
|
1452 if field.attname not in model_init_kwargs.keys(): |
|
1453 skip.add(field.attname) |
|
1454 |
|
1455 if skip: |
|
1456 if self.model._meta.pk.attname in skip: |
|
1457 raise InvalidQuery('Raw query must include the primary key') |
|
1458 model_cls = deferred_class_factory(self.model, skip) |
|
1459 else: |
|
1460 model_cls = self.model |
|
1461 |
|
1462 instance = model_cls(**model_init_kwargs) |
|
1463 |
|
1464 for field, value in annotations: |
|
1465 setattr(instance, field, value) |
|
1466 |
|
1467 instance._state.db = self.query.using |
|
1468 |
|
1469 return instance |
|
1470 |
|
1471 def insert_query(model, values, return_id=False, raw_values=False, using=None): |
|
1472 """ |
|
1473 Inserts a new record for the given model. This provides an interface to |
|
1474 the InsertQuery class and is how Model.save() is implemented. It is not |
|
1475 part of the public API. |
|
1476 """ |
|
1477 query = sql.InsertQuery(model) |
|
1478 query.insert_values(values, raw_values) |
|
1479 return query.get_compiler(using=using).execute_sql(return_id) |