|
1 """ |
|
2 Create SQL statements for QuerySets. |
|
3 |
|
4 The code in here encapsulates all of the SQL construction so that QuerySets |
|
5 themselves do not have to (and could be backed by things other than SQL |
|
6 databases). The abstraction barrier only works one way: this module has to know |
|
7 all about the internals of models in order to get the information it needs. |
|
8 """ |
|
9 |
|
10 from copy import deepcopy |
|
11 |
|
12 from django.utils.tree import Node |
|
13 from django.utils.datastructures import SortedDict |
|
14 from django.utils.encoding import force_unicode |
|
15 from django.db.backends.util import truncate_name |
|
16 from django.db import connection |
|
17 from django.db.models import signals |
|
18 from django.db.models.fields import FieldDoesNotExist |
|
19 from django.db.models.query_utils import select_related_descend |
|
20 from django.db.models.sql import aggregates as base_aggregates_module |
|
21 from django.db.models.sql.expressions import SQLEvaluator |
|
22 from django.db.models.sql.where import WhereNode, Constraint, EverythingNode, AND, OR |
|
23 from django.core.exceptions import FieldError |
|
24 from datastructures import EmptyResultSet, Empty, MultiJoin |
|
25 from constants import * |
|
26 |
|
27 try: |
|
28 set |
|
29 except NameError: |
|
30 from sets import Set as set # Python 2.3 fallback |
|
31 |
|
32 __all__ = ['Query', 'BaseQuery'] |
|
33 |
|
34 class BaseQuery(object): |
|
35 """ |
|
36 A single SQL query. |
|
37 """ |
|
38 # SQL join types. These are part of the class because their string forms |
|
39 # vary from database to database and can be customised by a subclass. |
|
40 INNER = 'INNER JOIN' |
|
41 LOUTER = 'LEFT OUTER JOIN' |
|
42 |
|
43 alias_prefix = 'T' |
|
44 query_terms = QUERY_TERMS |
|
45 aggregates_module = base_aggregates_module |
|
46 |
|
47 def __init__(self, model, connection, where=WhereNode): |
|
48 self.model = model |
|
49 self.connection = connection |
|
50 self.alias_refcount = {} |
|
51 self.alias_map = {} # Maps alias to join information |
|
52 self.table_map = {} # Maps table names to list of aliases. |
|
53 self.join_map = {} |
|
54 self.rev_join_map = {} # Reverse of join_map. |
|
55 self.quote_cache = {} |
|
56 self.default_cols = True |
|
57 self.default_ordering = True |
|
58 self.standard_ordering = True |
|
59 self.ordering_aliases = [] |
|
60 self.select_fields = [] |
|
61 self.related_select_fields = [] |
|
62 self.dupe_avoidance = {} |
|
63 self.used_aliases = set() |
|
64 self.filter_is_sticky = False |
|
65 self.included_inherited_models = {} |
|
66 |
|
67 # SQL-related attributes |
|
68 self.select = [] |
|
69 self.tables = [] # Aliases in the order they are created. |
|
70 self.where = where() |
|
71 self.where_class = where |
|
72 self.group_by = None |
|
73 self.having = where() |
|
74 self.order_by = [] |
|
75 self.low_mark, self.high_mark = 0, None # Used for offset/limit |
|
76 self.distinct = False |
|
77 self.select_related = False |
|
78 self.related_select_cols = [] |
|
79 |
|
80 # SQL aggregate-related attributes |
|
81 self.aggregates = SortedDict() # Maps alias -> SQL aggregate function |
|
82 self.aggregate_select_mask = None |
|
83 self._aggregate_select_cache = None |
|
84 |
|
85 # Arbitrary maximum limit for select_related. Prevents infinite |
|
86 # recursion. Can be changed by the depth parameter to select_related(). |
|
87 self.max_depth = 5 |
|
88 |
|
89 # These are for extensions. The contents are more or less appended |
|
90 # verbatim to the appropriate clause. |
|
91 self.extra = SortedDict() # Maps col_alias -> (col_sql, params). |
|
92 self.extra_select_mask = None |
|
93 self._extra_select_cache = None |
|
94 |
|
95 self.extra_tables = () |
|
96 self.extra_where = () |
|
97 self.extra_params = () |
|
98 self.extra_order_by = () |
|
99 |
|
100 # A tuple that is a set of model field names and either True, if these |
|
101 # are the fields to defer, or False if these are the only fields to |
|
102 # load. |
|
103 self.deferred_loading = (set(), True) |
|
104 |
|
105 def __str__(self): |
|
106 """ |
|
107 Returns the query as a string of SQL with the parameter values |
|
108 substituted in. |
|
109 |
|
110 Parameter values won't necessarily be quoted correctly, since that is |
|
111 done by the database interface at execution time. |
|
112 """ |
|
113 sql, params = self.as_sql() |
|
114 return sql % params |
|
115 |
|
116 def __deepcopy__(self, memo): |
|
117 result= self.clone() |
|
118 memo[id(self)] = result |
|
119 return result |
|
120 |
|
121 def __getstate__(self): |
|
122 """ |
|
123 Pickling support. |
|
124 """ |
|
125 obj_dict = self.__dict__.copy() |
|
126 obj_dict['related_select_fields'] = [] |
|
127 obj_dict['related_select_cols'] = [] |
|
128 del obj_dict['connection'] |
|
129 |
|
130 # Fields can't be pickled, so if a field list has been |
|
131 # specified, we pickle the list of field names instead. |
|
132 # None is also a possible value; that can pass as-is |
|
133 obj_dict['select_fields'] = [ |
|
134 f is not None and f.name or None |
|
135 for f in obj_dict['select_fields'] |
|
136 ] |
|
137 return obj_dict |
|
138 |
|
139 def __setstate__(self, obj_dict): |
|
140 """ |
|
141 Unpickling support. |
|
142 """ |
|
143 # Rebuild list of field instances |
|
144 obj_dict['select_fields'] = [ |
|
145 name is not None and obj_dict['model']._meta.get_field(name) or None |
|
146 for name in obj_dict['select_fields'] |
|
147 ] |
|
148 |
|
149 self.__dict__.update(obj_dict) |
|
150 # XXX: Need a better solution for this when multi-db stuff is |
|
151 # supported. It's the only class-reference to the module-level |
|
152 # connection variable. |
|
153 self.connection = connection |
|
154 |
|
155 def get_meta(self): |
|
156 """ |
|
157 Returns the Options instance (the model._meta) from which to start |
|
158 processing. Normally, this is self.model._meta, but it can be changed |
|
159 by subclasses. |
|
160 """ |
|
161 return self.model._meta |
|
162 |
|
163 def quote_name_unless_alias(self, name): |
|
164 """ |
|
165 A wrapper around connection.ops.quote_name that doesn't quote aliases |
|
166 for table names. This avoids problems with some SQL dialects that treat |
|
167 quoted strings specially (e.g. PostgreSQL). |
|
168 """ |
|
169 if name in self.quote_cache: |
|
170 return self.quote_cache[name] |
|
171 if ((name in self.alias_map and name not in self.table_map) or |
|
172 name in self.extra_select): |
|
173 self.quote_cache[name] = name |
|
174 return name |
|
175 r = self.connection.ops.quote_name(name) |
|
176 self.quote_cache[name] = r |
|
177 return r |
|
178 |
|
179 def clone(self, klass=None, **kwargs): |
|
180 """ |
|
181 Creates a copy of the current instance. The 'kwargs' parameter can be |
|
182 used by clients to update attributes after copying has taken place. |
|
183 """ |
|
184 obj = Empty() |
|
185 obj.__class__ = klass or self.__class__ |
|
186 obj.model = self.model |
|
187 obj.connection = self.connection |
|
188 obj.alias_refcount = self.alias_refcount.copy() |
|
189 obj.alias_map = self.alias_map.copy() |
|
190 obj.table_map = self.table_map.copy() |
|
191 obj.join_map = self.join_map.copy() |
|
192 obj.rev_join_map = self.rev_join_map.copy() |
|
193 obj.quote_cache = {} |
|
194 obj.default_cols = self.default_cols |
|
195 obj.default_ordering = self.default_ordering |
|
196 obj.standard_ordering = self.standard_ordering |
|
197 obj.included_inherited_models = self.included_inherited_models.copy() |
|
198 obj.ordering_aliases = [] |
|
199 obj.select_fields = self.select_fields[:] |
|
200 obj.related_select_fields = self.related_select_fields[:] |
|
201 obj.dupe_avoidance = self.dupe_avoidance.copy() |
|
202 obj.select = self.select[:] |
|
203 obj.tables = self.tables[:] |
|
204 obj.where = deepcopy(self.where) |
|
205 obj.where_class = self.where_class |
|
206 if self.group_by is None: |
|
207 obj.group_by = None |
|
208 else: |
|
209 obj.group_by = self.group_by[:] |
|
210 obj.having = deepcopy(self.having) |
|
211 obj.order_by = self.order_by[:] |
|
212 obj.low_mark, obj.high_mark = self.low_mark, self.high_mark |
|
213 obj.distinct = self.distinct |
|
214 obj.select_related = self.select_related |
|
215 obj.related_select_cols = [] |
|
216 obj.aggregates = deepcopy(self.aggregates) |
|
217 if self.aggregate_select_mask is None: |
|
218 obj.aggregate_select_mask = None |
|
219 else: |
|
220 obj.aggregate_select_mask = self.aggregate_select_mask.copy() |
|
221 if self._aggregate_select_cache is None: |
|
222 obj._aggregate_select_cache = None |
|
223 else: |
|
224 obj._aggregate_select_cache = self._aggregate_select_cache.copy() |
|
225 obj.max_depth = self.max_depth |
|
226 obj.extra = self.extra.copy() |
|
227 if self.extra_select_mask is None: |
|
228 obj.extra_select_mask = None |
|
229 else: |
|
230 obj.extra_select_mask = self.extra_select_mask.copy() |
|
231 if self._extra_select_cache is None: |
|
232 obj._extra_select_cache = None |
|
233 else: |
|
234 obj._extra_select_cache = self._extra_select_cache.copy() |
|
235 obj.extra_tables = self.extra_tables |
|
236 obj.extra_where = self.extra_where |
|
237 obj.extra_params = self.extra_params |
|
238 obj.extra_order_by = self.extra_order_by |
|
239 obj.deferred_loading = deepcopy(self.deferred_loading) |
|
240 if self.filter_is_sticky and self.used_aliases: |
|
241 obj.used_aliases = self.used_aliases.copy() |
|
242 else: |
|
243 obj.used_aliases = set() |
|
244 obj.filter_is_sticky = False |
|
245 obj.__dict__.update(kwargs) |
|
246 if hasattr(obj, '_setup_query'): |
|
247 obj._setup_query() |
|
248 return obj |
|
249 |
|
250 def convert_values(self, value, field): |
|
251 """Convert the database-returned value into a type that is consistent |
|
252 across database backends. |
|
253 |
|
254 By default, this defers to the underlying backend operations, but |
|
255 it can be overridden by Query classes for specific backends. |
|
256 """ |
|
257 return self.connection.ops.convert_values(value, field) |
|
258 |
|
259 def resolve_aggregate(self, value, aggregate): |
|
260 """Resolve the value of aggregates returned by the database to |
|
261 consistent (and reasonable) types. |
|
262 |
|
263 This is required because of the predisposition of certain backends |
|
264 to return Decimal and long types when they are not needed. |
|
265 """ |
|
266 if value is None: |
|
267 if aggregate.is_ordinal: |
|
268 return 0 |
|
269 # Return None as-is |
|
270 return value |
|
271 elif aggregate.is_ordinal: |
|
272 # Any ordinal aggregate (e.g., count) returns an int |
|
273 return int(value) |
|
274 elif aggregate.is_computed: |
|
275 # Any computed aggregate (e.g., avg) returns a float |
|
276 return float(value) |
|
277 else: |
|
278 # Return value depends on the type of the field being processed. |
|
279 return self.convert_values(value, aggregate.field) |
|
280 |
|
281 def results_iter(self): |
|
282 """ |
|
283 Returns an iterator over the results from executing this query. |
|
284 """ |
|
285 resolve_columns = hasattr(self, 'resolve_columns') |
|
286 fields = None |
|
287 for rows in self.execute_sql(MULTI): |
|
288 for row in rows: |
|
289 if resolve_columns: |
|
290 if fields is None: |
|
291 # We only set this up here because |
|
292 # related_select_fields isn't populated until |
|
293 # execute_sql() has been called. |
|
294 if self.select_fields: |
|
295 fields = self.select_fields + self.related_select_fields |
|
296 else: |
|
297 fields = self.model._meta.fields |
|
298 row = self.resolve_columns(row, fields) |
|
299 |
|
300 if self.aggregate_select: |
|
301 aggregate_start = len(self.extra_select.keys()) + len(self.select) |
|
302 aggregate_end = aggregate_start + len(self.aggregate_select) |
|
303 row = tuple(row[:aggregate_start]) + tuple([ |
|
304 self.resolve_aggregate(value, aggregate) |
|
305 for (alias, aggregate), value |
|
306 in zip(self.aggregate_select.items(), row[aggregate_start:aggregate_end]) |
|
307 ]) + tuple(row[aggregate_end:]) |
|
308 |
|
309 yield row |
|
310 |
|
311 def get_aggregation(self): |
|
312 """ |
|
313 Returns the dictionary with the values of the existing aggregations. |
|
314 """ |
|
315 if not self.aggregate_select: |
|
316 return {} |
|
317 |
|
318 # If there is a group by clause, aggregating does not add useful |
|
319 # information but retrieves only the first row. Aggregate |
|
320 # over the subquery instead. |
|
321 if self.group_by is not None: |
|
322 from subqueries import AggregateQuery |
|
323 query = AggregateQuery(self.model, self.connection) |
|
324 |
|
325 obj = self.clone() |
|
326 |
|
327 # Remove any aggregates marked for reduction from the subquery |
|
328 # and move them to the outer AggregateQuery. |
|
329 for alias, aggregate in self.aggregate_select.items(): |
|
330 if aggregate.is_summary: |
|
331 query.aggregate_select[alias] = aggregate |
|
332 del obj.aggregate_select[alias] |
|
333 |
|
334 query.add_subquery(obj) |
|
335 else: |
|
336 query = self |
|
337 self.select = [] |
|
338 self.default_cols = False |
|
339 self.extra = {} |
|
340 self.remove_inherited_models() |
|
341 |
|
342 query.clear_ordering(True) |
|
343 query.clear_limits() |
|
344 query.select_related = False |
|
345 query.related_select_cols = [] |
|
346 query.related_select_fields = [] |
|
347 |
|
348 result = query.execute_sql(SINGLE) |
|
349 if result is None: |
|
350 result = [None for q in query.aggregate_select.items()] |
|
351 |
|
352 return dict([ |
|
353 (alias, self.resolve_aggregate(val, aggregate)) |
|
354 for (alias, aggregate), val |
|
355 in zip(query.aggregate_select.items(), result) |
|
356 ]) |
|
357 |
|
358 def get_count(self): |
|
359 """ |
|
360 Performs a COUNT() query using the current filter constraints. |
|
361 """ |
|
362 obj = self.clone() |
|
363 if len(self.select) > 1 or self.aggregate_select: |
|
364 # If a select clause exists, then the query has already started to |
|
365 # specify the columns that are to be returned. |
|
366 # In this case, we need to use a subquery to evaluate the count. |
|
367 from subqueries import AggregateQuery |
|
368 subquery = obj |
|
369 subquery.clear_ordering(True) |
|
370 subquery.clear_limits() |
|
371 |
|
372 obj = AggregateQuery(obj.model, obj.connection) |
|
373 obj.add_subquery(subquery) |
|
374 |
|
375 obj.add_count_column() |
|
376 number = obj.get_aggregation()[None] |
|
377 |
|
378 # Apply offset and limit constraints manually, since using LIMIT/OFFSET |
|
379 # in SQL (in variants that provide them) doesn't change the COUNT |
|
380 # output. |
|
381 number = max(0, number - self.low_mark) |
|
382 if self.high_mark is not None: |
|
383 number = min(number, self.high_mark - self.low_mark) |
|
384 |
|
385 return number |
|
386 |
|
387 def as_sql(self, with_limits=True, with_col_aliases=False): |
|
388 """ |
|
389 Creates the SQL for this query. Returns the SQL string and list of |
|
390 parameters. |
|
391 |
|
392 If 'with_limits' is False, any limit/offset information is not included |
|
393 in the query. |
|
394 """ |
|
395 self.pre_sql_setup() |
|
396 out_cols = self.get_columns(with_col_aliases) |
|
397 ordering, ordering_group_by = self.get_ordering() |
|
398 |
|
399 # This must come after 'select' and 'ordering' -- see docstring of |
|
400 # get_from_clause() for details. |
|
401 from_, f_params = self.get_from_clause() |
|
402 |
|
403 qn = self.quote_name_unless_alias |
|
404 where, w_params = self.where.as_sql(qn=qn) |
|
405 having, h_params = self.having.as_sql(qn=qn) |
|
406 params = [] |
|
407 for val in self.extra_select.itervalues(): |
|
408 params.extend(val[1]) |
|
409 |
|
410 result = ['SELECT'] |
|
411 if self.distinct: |
|
412 result.append('DISTINCT') |
|
413 result.append(', '.join(out_cols + self.ordering_aliases)) |
|
414 |
|
415 result.append('FROM') |
|
416 result.extend(from_) |
|
417 params.extend(f_params) |
|
418 |
|
419 if where: |
|
420 result.append('WHERE %s' % where) |
|
421 params.extend(w_params) |
|
422 if self.extra_where: |
|
423 if not where: |
|
424 result.append('WHERE') |
|
425 else: |
|
426 result.append('AND') |
|
427 result.append(' AND '.join(self.extra_where)) |
|
428 |
|
429 grouping, gb_params = self.get_grouping() |
|
430 if grouping: |
|
431 if ordering: |
|
432 # If the backend can't group by PK (i.e., any database |
|
433 # other than MySQL), then any fields mentioned in the |
|
434 # ordering clause needs to be in the group by clause. |
|
435 if not self.connection.features.allows_group_by_pk: |
|
436 for col, col_params in ordering_group_by: |
|
437 if col not in grouping: |
|
438 grouping.append(str(col)) |
|
439 gb_params.extend(col_params) |
|
440 else: |
|
441 ordering = self.connection.ops.force_no_ordering() |
|
442 result.append('GROUP BY %s' % ', '.join(grouping)) |
|
443 params.extend(gb_params) |
|
444 |
|
445 if having: |
|
446 result.append('HAVING %s' % having) |
|
447 params.extend(h_params) |
|
448 |
|
449 if ordering: |
|
450 result.append('ORDER BY %s' % ', '.join(ordering)) |
|
451 |
|
452 if with_limits: |
|
453 if self.high_mark is not None: |
|
454 result.append('LIMIT %d' % (self.high_mark - self.low_mark)) |
|
455 if self.low_mark: |
|
456 if self.high_mark is None: |
|
457 val = self.connection.ops.no_limit_value() |
|
458 if val: |
|
459 result.append('LIMIT %d' % val) |
|
460 result.append('OFFSET %d' % self.low_mark) |
|
461 |
|
462 params.extend(self.extra_params) |
|
463 return ' '.join(result), tuple(params) |
|
464 |
|
465 def as_nested_sql(self): |
|
466 """ |
|
467 Perform the same functionality as the as_sql() method, returning an |
|
468 SQL string and parameters. However, the alias prefixes are bumped |
|
469 beforehand (in a copy -- the current query isn't changed) and any |
|
470 ordering is removed. |
|
471 |
|
472 Used when nesting this query inside another. |
|
473 """ |
|
474 obj = self.clone() |
|
475 obj.clear_ordering(True) |
|
476 obj.bump_prefix() |
|
477 return obj.as_sql() |
|
478 |
|
479 def combine(self, rhs, connector): |
|
480 """ |
|
481 Merge the 'rhs' query into the current one (with any 'rhs' effects |
|
482 being applied *after* (that is, "to the right of") anything in the |
|
483 current query. 'rhs' is not modified during a call to this function. |
|
484 |
|
485 The 'connector' parameter describes how to connect filters from the |
|
486 'rhs' query. |
|
487 """ |
|
488 assert self.model == rhs.model, \ |
|
489 "Cannot combine queries on two different base models." |
|
490 assert self.can_filter(), \ |
|
491 "Cannot combine queries once a slice has been taken." |
|
492 assert self.distinct == rhs.distinct, \ |
|
493 "Cannot combine a unique query with a non-unique query." |
|
494 |
|
495 self.remove_inherited_models() |
|
496 # Work out how to relabel the rhs aliases, if necessary. |
|
497 change_map = {} |
|
498 used = set() |
|
499 conjunction = (connector == AND) |
|
500 first = True |
|
501 for alias in rhs.tables: |
|
502 if not rhs.alias_refcount[alias]: |
|
503 # An unused alias. |
|
504 continue |
|
505 promote = (rhs.alias_map[alias][JOIN_TYPE] == self.LOUTER) |
|
506 new_alias = self.join(rhs.rev_join_map[alias], |
|
507 (conjunction and not first), used, promote, not conjunction) |
|
508 used.add(new_alias) |
|
509 change_map[alias] = new_alias |
|
510 first = False |
|
511 |
|
512 # So that we don't exclude valid results in an "or" query combination, |
|
513 # the first join that is exclusive to the lhs (self) must be converted |
|
514 # to an outer join. |
|
515 if not conjunction: |
|
516 for alias in self.tables[1:]: |
|
517 if self.alias_refcount[alias] == 1: |
|
518 self.promote_alias(alias, True) |
|
519 break |
|
520 |
|
521 # Now relabel a copy of the rhs where-clause and add it to the current |
|
522 # one. |
|
523 if rhs.where: |
|
524 w = deepcopy(rhs.where) |
|
525 w.relabel_aliases(change_map) |
|
526 if not self.where: |
|
527 # Since 'self' matches everything, add an explicit "include |
|
528 # everything" where-constraint so that connections between the |
|
529 # where clauses won't exclude valid results. |
|
530 self.where.add(EverythingNode(), AND) |
|
531 elif self.where: |
|
532 # rhs has an empty where clause. |
|
533 w = self.where_class() |
|
534 w.add(EverythingNode(), AND) |
|
535 else: |
|
536 w = self.where_class() |
|
537 self.where.add(w, connector) |
|
538 |
|
539 # Selection columns and extra extensions are those provided by 'rhs'. |
|
540 self.select = [] |
|
541 for col in rhs.select: |
|
542 if isinstance(col, (list, tuple)): |
|
543 self.select.append((change_map.get(col[0], col[0]), col[1])) |
|
544 else: |
|
545 item = deepcopy(col) |
|
546 item.relabel_aliases(change_map) |
|
547 self.select.append(item) |
|
548 self.select_fields = rhs.select_fields[:] |
|
549 |
|
550 if connector == OR: |
|
551 # It would be nice to be able to handle this, but the queries don't |
|
552 # really make sense (or return consistent value sets). Not worth |
|
553 # the extra complexity when you can write a real query instead. |
|
554 if self.extra and rhs.extra: |
|
555 raise ValueError("When merging querysets using 'or', you " |
|
556 "cannot have extra(select=...) on both sides.") |
|
557 if self.extra_where and rhs.extra_where: |
|
558 raise ValueError("When merging querysets using 'or', you " |
|
559 "cannot have extra(where=...) on both sides.") |
|
560 self.extra.update(rhs.extra) |
|
561 extra_select_mask = set() |
|
562 if self.extra_select_mask is not None: |
|
563 extra_select_mask.update(self.extra_select_mask) |
|
564 if rhs.extra_select_mask is not None: |
|
565 extra_select_mask.update(rhs.extra_select_mask) |
|
566 if extra_select_mask: |
|
567 self.set_extra_mask(extra_select_mask) |
|
568 self.extra_tables += rhs.extra_tables |
|
569 self.extra_where += rhs.extra_where |
|
570 self.extra_params += rhs.extra_params |
|
571 |
|
572 # Ordering uses the 'rhs' ordering, unless it has none, in which case |
|
573 # the current ordering is used. |
|
574 self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by |
|
575 self.extra_order_by = rhs.extra_order_by or self.extra_order_by |
|
576 |
|
577 def pre_sql_setup(self): |
|
578 """ |
|
579 Does any necessary class setup immediately prior to producing SQL. This |
|
580 is for things that can't necessarily be done in __init__ because we |
|
581 might not have all the pieces in place at that time. |
|
582 """ |
|
583 if not self.tables: |
|
584 self.join((None, self.model._meta.db_table, None, None)) |
|
585 if (not self.select and self.default_cols and not |
|
586 self.included_inherited_models): |
|
587 self.setup_inherited_models() |
|
588 if self.select_related and not self.related_select_cols: |
|
589 self.fill_related_selections() |
|
590 |
|
591 def deferred_to_data(self, target, callback): |
|
592 """ |
|
593 Converts the self.deferred_loading data structure to an alternate data |
|
594 structure, describing the field that *will* be loaded. This is used to |
|
595 compute the columns to select from the database and also by the |
|
596 QuerySet class to work out which fields are being initialised on each |
|
597 model. Models that have all their fields included aren't mentioned in |
|
598 the result, only those that have field restrictions in place. |
|
599 |
|
600 The "target" parameter is the instance that is populated (in place). |
|
601 The "callback" is a function that is called whenever a (model, field) |
|
602 pair need to be added to "target". It accepts three parameters: |
|
603 "target", and the model and list of fields being added for that model. |
|
604 """ |
|
605 field_names, defer = self.deferred_loading |
|
606 if not field_names: |
|
607 return |
|
608 columns = set() |
|
609 orig_opts = self.model._meta |
|
610 seen = {} |
|
611 must_include = {self.model: set([orig_opts.pk])} |
|
612 for field_name in field_names: |
|
613 parts = field_name.split(LOOKUP_SEP) |
|
614 cur_model = self.model |
|
615 opts = orig_opts |
|
616 for name in parts[:-1]: |
|
617 old_model = cur_model |
|
618 source = opts.get_field_by_name(name)[0] |
|
619 cur_model = opts.get_field_by_name(name)[0].rel.to |
|
620 opts = cur_model._meta |
|
621 # Even if we're "just passing through" this model, we must add |
|
622 # both the current model's pk and the related reference field |
|
623 # to the things we select. |
|
624 must_include[old_model].add(source) |
|
625 add_to_dict(must_include, cur_model, opts.pk) |
|
626 field, model, _, _ = opts.get_field_by_name(parts[-1]) |
|
627 if model is None: |
|
628 model = cur_model |
|
629 add_to_dict(seen, model, field) |
|
630 |
|
631 if defer: |
|
632 # We need to load all fields for each model, except those that |
|
633 # appear in "seen" (for all models that appear in "seen"). The only |
|
634 # slight complexity here is handling fields that exist on parent |
|
635 # models. |
|
636 workset = {} |
|
637 for model, values in seen.iteritems(): |
|
638 for field in model._meta.local_fields: |
|
639 if field in values: |
|
640 continue |
|
641 add_to_dict(workset, model, field) |
|
642 for model, values in must_include.iteritems(): |
|
643 # If we haven't included a model in workset, we don't add the |
|
644 # corresponding must_include fields for that model, since an |
|
645 # empty set means "include all fields". That's why there's no |
|
646 # "else" branch here. |
|
647 if model in workset: |
|
648 workset[model].update(values) |
|
649 for model, values in workset.iteritems(): |
|
650 callback(target, model, values) |
|
651 else: |
|
652 for model, values in must_include.iteritems(): |
|
653 if model in seen: |
|
654 seen[model].update(values) |
|
655 else: |
|
656 # As we've passed through this model, but not explicitly |
|
657 # included any fields, we have to make sure it's mentioned |
|
658 # so that only the "must include" fields are pulled in. |
|
659 seen[model] = values |
|
660 # Now ensure that every model in the inheritance chain is mentioned |
|
661 # in the parent list. Again, it must be mentioned to ensure that |
|
662 # only "must include" fields are pulled in. |
|
663 for model in orig_opts.get_parent_list(): |
|
664 if model not in seen: |
|
665 seen[model] = set() |
|
666 for model, values in seen.iteritems(): |
|
667 callback(target, model, values) |
|
668 |
|
669 def deferred_to_columns(self): |
|
670 """ |
|
671 Converts the self.deferred_loading data structure to mapping of table |
|
672 names to sets of column names which are to be loaded. Returns the |
|
673 dictionary. |
|
674 """ |
|
675 columns = {} |
|
676 self.deferred_to_data(columns, self.deferred_to_columns_cb) |
|
677 return columns |
|
678 |
|
679 def deferred_to_columns_cb(self, target, model, fields): |
|
680 """ |
|
681 Callback used by deferred_to_columns(). The "target" parameter should |
|
682 be a set instance. |
|
683 """ |
|
684 table = model._meta.db_table |
|
685 if table not in target: |
|
686 target[table] = set() |
|
687 for field in fields: |
|
688 target[table].add(field.column) |
|
689 |
|
690 def get_columns(self, with_aliases=False): |
|
691 """ |
|
692 Returns the list of columns to use in the select statement. If no |
|
693 columns have been specified, returns all columns relating to fields in |
|
694 the model. |
|
695 |
|
696 If 'with_aliases' is true, any column names that are duplicated |
|
697 (without the table names) are given unique aliases. This is needed in |
|
698 some cases to avoid ambiguity with nested queries. |
|
699 """ |
|
700 qn = self.quote_name_unless_alias |
|
701 qn2 = self.connection.ops.quote_name |
|
702 result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.extra_select.iteritems()] |
|
703 aliases = set(self.extra_select.keys()) |
|
704 if with_aliases: |
|
705 col_aliases = aliases.copy() |
|
706 else: |
|
707 col_aliases = set() |
|
708 if self.select: |
|
709 only_load = self.deferred_to_columns() |
|
710 for col in self.select: |
|
711 if isinstance(col, (list, tuple)): |
|
712 alias, column = col |
|
713 table = self.alias_map[alias][TABLE_NAME] |
|
714 if table in only_load and col not in only_load[table]: |
|
715 continue |
|
716 r = '%s.%s' % (qn(alias), qn(column)) |
|
717 if with_aliases: |
|
718 if col[1] in col_aliases: |
|
719 c_alias = 'Col%d' % len(col_aliases) |
|
720 result.append('%s AS %s' % (r, c_alias)) |
|
721 aliases.add(c_alias) |
|
722 col_aliases.add(c_alias) |
|
723 else: |
|
724 result.append('%s AS %s' % (r, qn2(col[1]))) |
|
725 aliases.add(r) |
|
726 col_aliases.add(col[1]) |
|
727 else: |
|
728 result.append(r) |
|
729 aliases.add(r) |
|
730 col_aliases.add(col[1]) |
|
731 else: |
|
732 result.append(col.as_sql(quote_func=qn)) |
|
733 |
|
734 if hasattr(col, 'alias'): |
|
735 aliases.add(col.alias) |
|
736 col_aliases.add(col.alias) |
|
737 |
|
738 elif self.default_cols: |
|
739 cols, new_aliases = self.get_default_columns(with_aliases, |
|
740 col_aliases) |
|
741 result.extend(cols) |
|
742 aliases.update(new_aliases) |
|
743 |
|
744 result.extend([ |
|
745 '%s%s' % ( |
|
746 aggregate.as_sql(quote_func=qn), |
|
747 alias is not None and ' AS %s' % qn(alias) or '' |
|
748 ) |
|
749 for alias, aggregate in self.aggregate_select.items() |
|
750 ]) |
|
751 |
|
752 for table, col in self.related_select_cols: |
|
753 r = '%s.%s' % (qn(table), qn(col)) |
|
754 if with_aliases and col in col_aliases: |
|
755 c_alias = 'Col%d' % len(col_aliases) |
|
756 result.append('%s AS %s' % (r, c_alias)) |
|
757 aliases.add(c_alias) |
|
758 col_aliases.add(c_alias) |
|
759 else: |
|
760 result.append(r) |
|
761 aliases.add(r) |
|
762 col_aliases.add(col) |
|
763 |
|
764 self._select_aliases = aliases |
|
765 return result |
|
766 |
|
767 def get_default_columns(self, with_aliases=False, col_aliases=None, |
|
768 start_alias=None, opts=None, as_pairs=False): |
|
769 """ |
|
770 Computes the default columns for selecting every field in the base |
|
771 model. Will sometimes be called to pull in related models (e.g. via |
|
772 select_related), in which case "opts" and "start_alias" will be given |
|
773 to provide a starting point for the traversal. |
|
774 |
|
775 Returns a list of strings, quoted appropriately for use in SQL |
|
776 directly, as well as a set of aliases used in the select statement (if |
|
777 'as_pairs' is True, returns a list of (alias, col_name) pairs instead |
|
778 of strings as the first component and None as the second component). |
|
779 """ |
|
780 result = [] |
|
781 if opts is None: |
|
782 opts = self.model._meta |
|
783 qn = self.quote_name_unless_alias |
|
784 qn2 = self.connection.ops.quote_name |
|
785 aliases = set() |
|
786 only_load = self.deferred_to_columns() |
|
787 # Skip all proxy to the root proxied model |
|
788 proxied_model = get_proxied_model(opts) |
|
789 |
|
790 if start_alias: |
|
791 seen = {None: start_alias} |
|
792 for field, model in opts.get_fields_with_model(): |
|
793 if start_alias: |
|
794 try: |
|
795 alias = seen[model] |
|
796 except KeyError: |
|
797 if model is proxied_model: |
|
798 alias = start_alias |
|
799 else: |
|
800 link_field = opts.get_ancestor_link(model) |
|
801 alias = self.join((start_alias, model._meta.db_table, |
|
802 link_field.column, model._meta.pk.column)) |
|
803 seen[model] = alias |
|
804 else: |
|
805 # If we're starting from the base model of the queryset, the |
|
806 # aliases will have already been set up in pre_sql_setup(), so |
|
807 # we can save time here. |
|
808 alias = self.included_inherited_models[model] |
|
809 table = self.alias_map[alias][TABLE_NAME] |
|
810 if table in only_load and field.column not in only_load[table]: |
|
811 continue |
|
812 if as_pairs: |
|
813 result.append((alias, field.column)) |
|
814 aliases.add(alias) |
|
815 continue |
|
816 if with_aliases and field.column in col_aliases: |
|
817 c_alias = 'Col%d' % len(col_aliases) |
|
818 result.append('%s.%s AS %s' % (qn(alias), |
|
819 qn2(field.column), c_alias)) |
|
820 col_aliases.add(c_alias) |
|
821 aliases.add(c_alias) |
|
822 else: |
|
823 r = '%s.%s' % (qn(alias), qn2(field.column)) |
|
824 result.append(r) |
|
825 aliases.add(r) |
|
826 if with_aliases: |
|
827 col_aliases.add(field.column) |
|
828 return result, aliases |
|
829 |
|
830 def get_from_clause(self): |
|
831 """ |
|
832 Returns a list of strings that are joined together to go after the |
|
833 "FROM" part of the query, as well as a list any extra parameters that |
|
834 need to be included. Sub-classes, can override this to create a |
|
835 from-clause via a "select". |
|
836 |
|
837 This should only be called after any SQL construction methods that |
|
838 might change the tables we need. This means the select columns and |
|
839 ordering must be done first. |
|
840 """ |
|
841 result = [] |
|
842 qn = self.quote_name_unless_alias |
|
843 qn2 = self.connection.ops.quote_name |
|
844 first = True |
|
845 for alias in self.tables: |
|
846 if not self.alias_refcount[alias]: |
|
847 continue |
|
848 try: |
|
849 name, alias, join_type, lhs, lhs_col, col, nullable = self.alias_map[alias] |
|
850 except KeyError: |
|
851 # Extra tables can end up in self.tables, but not in the |
|
852 # alias_map if they aren't in a join. That's OK. We skip them. |
|
853 continue |
|
854 alias_str = (alias != name and ' %s' % alias or '') |
|
855 if join_type and not first: |
|
856 result.append('%s %s%s ON (%s.%s = %s.%s)' |
|
857 % (join_type, qn(name), alias_str, qn(lhs), |
|
858 qn2(lhs_col), qn(alias), qn2(col))) |
|
859 else: |
|
860 connector = not first and ', ' or '' |
|
861 result.append('%s%s%s' % (connector, qn(name), alias_str)) |
|
862 first = False |
|
863 for t in self.extra_tables: |
|
864 alias, unused = self.table_alias(t) |
|
865 # Only add the alias if it's not already present (the table_alias() |
|
866 # calls increments the refcount, so an alias refcount of one means |
|
867 # this is the only reference. |
|
868 if alias not in self.alias_map or self.alias_refcount[alias] == 1: |
|
869 connector = not first and ', ' or '' |
|
870 result.append('%s%s' % (connector, qn(alias))) |
|
871 first = False |
|
872 return result, [] |
|
873 |
|
874 def get_grouping(self): |
|
875 """ |
|
876 Returns a tuple representing the SQL elements in the "group by" clause. |
|
877 """ |
|
878 qn = self.quote_name_unless_alias |
|
879 result, params = [], [] |
|
880 if self.group_by is not None: |
|
881 group_by = self.group_by or [] |
|
882 |
|
883 extra_selects = [] |
|
884 for extra_select, extra_params in self.extra_select.itervalues(): |
|
885 extra_selects.append(extra_select) |
|
886 params.extend(extra_params) |
|
887 for col in group_by + self.related_select_cols + extra_selects: |
|
888 if isinstance(col, (list, tuple)): |
|
889 result.append('%s.%s' % (qn(col[0]), qn(col[1]))) |
|
890 elif hasattr(col, 'as_sql'): |
|
891 result.append(col.as_sql(qn)) |
|
892 else: |
|
893 result.append(str(col)) |
|
894 return result, params |
|
895 |
|
896 def get_ordering(self): |
|
897 """ |
|
898 Returns a tuple containing a list representing the SQL elements in the |
|
899 "order by" clause, and the list of SQL elements that need to be added |
|
900 to the GROUP BY clause as a result of the ordering. |
|
901 |
|
902 Also sets the ordering_aliases attribute on this instance to a list of |
|
903 extra aliases needed in the select. |
|
904 |
|
905 Determining the ordering SQL can change the tables we need to include, |
|
906 so this should be run *before* get_from_clause(). |
|
907 """ |
|
908 if self.extra_order_by: |
|
909 ordering = self.extra_order_by |
|
910 elif not self.default_ordering: |
|
911 ordering = self.order_by |
|
912 else: |
|
913 ordering = self.order_by or self.model._meta.ordering |
|
914 qn = self.quote_name_unless_alias |
|
915 qn2 = self.connection.ops.quote_name |
|
916 distinct = self.distinct |
|
917 select_aliases = self._select_aliases |
|
918 result = [] |
|
919 group_by = [] |
|
920 ordering_aliases = [] |
|
921 if self.standard_ordering: |
|
922 asc, desc = ORDER_DIR['ASC'] |
|
923 else: |
|
924 asc, desc = ORDER_DIR['DESC'] |
|
925 |
|
926 # It's possible, due to model inheritance, that normal usage might try |
|
927 # to include the same field more than once in the ordering. We track |
|
928 # the table/column pairs we use and discard any after the first use. |
|
929 processed_pairs = set() |
|
930 |
|
931 for field in ordering: |
|
932 if field == '?': |
|
933 result.append(self.connection.ops.random_function_sql()) |
|
934 continue |
|
935 if isinstance(field, int): |
|
936 if field < 0: |
|
937 order = desc |
|
938 field = -field |
|
939 else: |
|
940 order = asc |
|
941 result.append('%s %s' % (field, order)) |
|
942 group_by.append((field, [])) |
|
943 continue |
|
944 col, order = get_order_dir(field, asc) |
|
945 if col in self.aggregate_select: |
|
946 result.append('%s %s' % (col, order)) |
|
947 continue |
|
948 if '.' in field: |
|
949 # This came in through an extra(order_by=...) addition. Pass it |
|
950 # on verbatim. |
|
951 table, col = col.split('.', 1) |
|
952 if (table, col) not in processed_pairs: |
|
953 elt = '%s.%s' % (qn(table), col) |
|
954 processed_pairs.add((table, col)) |
|
955 if not distinct or elt in select_aliases: |
|
956 result.append('%s %s' % (elt, order)) |
|
957 group_by.append((elt, [])) |
|
958 elif get_order_dir(field)[0] not in self.extra_select: |
|
959 # 'col' is of the form 'field' or 'field1__field2' or |
|
960 # '-field1__field2__field', etc. |
|
961 for table, col, order in self.find_ordering_name(field, |
|
962 self.model._meta, default_order=asc): |
|
963 if (table, col) not in processed_pairs: |
|
964 elt = '%s.%s' % (qn(table), qn2(col)) |
|
965 processed_pairs.add((table, col)) |
|
966 if distinct and elt not in select_aliases: |
|
967 ordering_aliases.append(elt) |
|
968 result.append('%s %s' % (elt, order)) |
|
969 group_by.append((elt, [])) |
|
970 else: |
|
971 elt = qn2(col) |
|
972 if distinct and col not in select_aliases: |
|
973 ordering_aliases.append(elt) |
|
974 result.append('%s %s' % (elt, order)) |
|
975 group_by.append(self.extra_select[col]) |
|
976 self.ordering_aliases = ordering_aliases |
|
977 return result, group_by |
|
978 |
|
979 def find_ordering_name(self, name, opts, alias=None, default_order='ASC', |
|
980 already_seen=None): |
|
981 """ |
|
982 Returns the table alias (the name might be ambiguous, the alias will |
|
983 not be) and column name for ordering by the given 'name' parameter. |
|
984 The 'name' is of the form 'field1__field2__...__fieldN'. |
|
985 """ |
|
986 name, order = get_order_dir(name, default_order) |
|
987 pieces = name.split(LOOKUP_SEP) |
|
988 if not alias: |
|
989 alias = self.get_initial_alias() |
|
990 field, target, opts, joins, last, extra = self.setup_joins(pieces, |
|
991 opts, alias, False) |
|
992 alias = joins[-1] |
|
993 col = target.column |
|
994 if not field.rel: |
|
995 # To avoid inadvertent trimming of a necessary alias, use the |
|
996 # refcount to show that we are referencing a non-relation field on |
|
997 # the model. |
|
998 self.ref_alias(alias) |
|
999 |
|
1000 # Must use left outer joins for nullable fields and their relations. |
|
1001 self.promote_alias_chain(joins, |
|
1002 self.alias_map[joins[0]][JOIN_TYPE] == self.LOUTER) |
|
1003 |
|
1004 # If we get to this point and the field is a relation to another model, |
|
1005 # append the default ordering for that model. |
|
1006 if field.rel and len(joins) > 1 and opts.ordering: |
|
1007 # Firstly, avoid infinite loops. |
|
1008 if not already_seen: |
|
1009 already_seen = set() |
|
1010 join_tuple = tuple([self.alias_map[j][TABLE_NAME] for j in joins]) |
|
1011 if join_tuple in already_seen: |
|
1012 raise FieldError('Infinite loop caused by ordering.') |
|
1013 already_seen.add(join_tuple) |
|
1014 |
|
1015 results = [] |
|
1016 for item in opts.ordering: |
|
1017 results.extend(self.find_ordering_name(item, opts, alias, |
|
1018 order, already_seen)) |
|
1019 return results |
|
1020 |
|
1021 if alias: |
|
1022 # We have to do the same "final join" optimisation as in |
|
1023 # add_filter, since the final column might not otherwise be part of |
|
1024 # the select set (so we can't order on it). |
|
1025 while 1: |
|
1026 join = self.alias_map[alias] |
|
1027 if col != join[RHS_JOIN_COL]: |
|
1028 break |
|
1029 self.unref_alias(alias) |
|
1030 alias = join[LHS_ALIAS] |
|
1031 col = join[LHS_JOIN_COL] |
|
1032 return [(alias, col, order)] |
|
1033 |
|
1034 def table_alias(self, table_name, create=False): |
|
1035 """ |
|
1036 Returns a table alias for the given table_name and whether this is a |
|
1037 new alias or not. |
|
1038 |
|
1039 If 'create' is true, a new alias is always created. Otherwise, the |
|
1040 most recently created alias for the table (if one exists) is reused. |
|
1041 """ |
|
1042 current = self.table_map.get(table_name) |
|
1043 if not create and current: |
|
1044 alias = current[0] |
|
1045 self.alias_refcount[alias] += 1 |
|
1046 return alias, False |
|
1047 |
|
1048 # Create a new alias for this table. |
|
1049 if current: |
|
1050 alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) |
|
1051 current.append(alias) |
|
1052 else: |
|
1053 # The first occurence of a table uses the table name directly. |
|
1054 alias = table_name |
|
1055 self.table_map[alias] = [alias] |
|
1056 self.alias_refcount[alias] = 1 |
|
1057 self.tables.append(alias) |
|
1058 return alias, True |
|
1059 |
|
1060 def ref_alias(self, alias): |
|
1061 """ Increases the reference count for this alias. """ |
|
1062 self.alias_refcount[alias] += 1 |
|
1063 |
|
1064 def unref_alias(self, alias): |
|
1065 """ Decreases the reference count for this alias. """ |
|
1066 self.alias_refcount[alias] -= 1 |
|
1067 |
|
1068 def promote_alias(self, alias, unconditional=False): |
|
1069 """ |
|
1070 Promotes the join type of an alias to an outer join if it's possible |
|
1071 for the join to contain NULL values on the left. If 'unconditional' is |
|
1072 False, the join is only promoted if it is nullable, otherwise it is |
|
1073 always promoted. |
|
1074 |
|
1075 Returns True if the join was promoted. |
|
1076 """ |
|
1077 if ((unconditional or self.alias_map[alias][NULLABLE]) and |
|
1078 self.alias_map[alias][JOIN_TYPE] != self.LOUTER): |
|
1079 data = list(self.alias_map[alias]) |
|
1080 data[JOIN_TYPE] = self.LOUTER |
|
1081 self.alias_map[alias] = tuple(data) |
|
1082 return True |
|
1083 return False |
|
1084 |
|
1085 def promote_alias_chain(self, chain, must_promote=False): |
|
1086 """ |
|
1087 Walks along a chain of aliases, promoting the first nullable join and |
|
1088 any joins following that. If 'must_promote' is True, all the aliases in |
|
1089 the chain are promoted. |
|
1090 """ |
|
1091 for alias in chain: |
|
1092 if self.promote_alias(alias, must_promote): |
|
1093 must_promote = True |
|
1094 |
|
1095 def promote_unused_aliases(self, initial_refcounts, used_aliases): |
|
1096 """ |
|
1097 Given a "before" copy of the alias_refcounts dictionary (as |
|
1098 'initial_refcounts') and a collection of aliases that may have been |
|
1099 changed or created, works out which aliases have been created since |
|
1100 then and which ones haven't been used and promotes all of those |
|
1101 aliases, plus any children of theirs in the alias tree, to outer joins. |
|
1102 """ |
|
1103 # FIXME: There's some (a lot of!) overlap with the similar OR promotion |
|
1104 # in add_filter(). It's not quite identical, but is very similar. So |
|
1105 # pulling out the common bits is something for later. |
|
1106 considered = {} |
|
1107 for alias in self.tables: |
|
1108 if alias not in used_aliases: |
|
1109 continue |
|
1110 if (alias not in initial_refcounts or |
|
1111 self.alias_refcount[alias] == initial_refcounts[alias]): |
|
1112 parent = self.alias_map[alias][LHS_ALIAS] |
|
1113 must_promote = considered.get(parent, False) |
|
1114 promoted = self.promote_alias(alias, must_promote) |
|
1115 considered[alias] = must_promote or promoted |
|
1116 |
|
1117 def change_aliases(self, change_map): |
|
1118 """ |
|
1119 Changes the aliases in change_map (which maps old-alias -> new-alias), |
|
1120 relabelling any references to them in select columns and the where |
|
1121 clause. |
|
1122 """ |
|
1123 assert set(change_map.keys()).intersection(set(change_map.values())) == set() |
|
1124 |
|
1125 # 1. Update references in "select" (normal columns plus aliases), |
|
1126 # "group by", "where" and "having". |
|
1127 self.where.relabel_aliases(change_map) |
|
1128 self.having.relabel_aliases(change_map) |
|
1129 for columns in (self.select, self.aggregates.values(), self.group_by or []): |
|
1130 for pos, col in enumerate(columns): |
|
1131 if isinstance(col, (list, tuple)): |
|
1132 old_alias = col[0] |
|
1133 columns[pos] = (change_map.get(old_alias, old_alias), col[1]) |
|
1134 else: |
|
1135 col.relabel_aliases(change_map) |
|
1136 |
|
1137 # 2. Rename the alias in the internal table/alias datastructures. |
|
1138 for old_alias, new_alias in change_map.iteritems(): |
|
1139 alias_data = list(self.alias_map[old_alias]) |
|
1140 alias_data[RHS_ALIAS] = new_alias |
|
1141 |
|
1142 t = self.rev_join_map[old_alias] |
|
1143 data = list(self.join_map[t]) |
|
1144 data[data.index(old_alias)] = new_alias |
|
1145 self.join_map[t] = tuple(data) |
|
1146 self.rev_join_map[new_alias] = t |
|
1147 del self.rev_join_map[old_alias] |
|
1148 self.alias_refcount[new_alias] = self.alias_refcount[old_alias] |
|
1149 del self.alias_refcount[old_alias] |
|
1150 self.alias_map[new_alias] = tuple(alias_data) |
|
1151 del self.alias_map[old_alias] |
|
1152 |
|
1153 table_aliases = self.table_map[alias_data[TABLE_NAME]] |
|
1154 for pos, alias in enumerate(table_aliases): |
|
1155 if alias == old_alias: |
|
1156 table_aliases[pos] = new_alias |
|
1157 break |
|
1158 for pos, alias in enumerate(self.tables): |
|
1159 if alias == old_alias: |
|
1160 self.tables[pos] = new_alias |
|
1161 break |
|
1162 for key, alias in self.included_inherited_models.items(): |
|
1163 if alias in change_map: |
|
1164 self.included_inherited_models[key] = change_map[alias] |
|
1165 |
|
1166 # 3. Update any joins that refer to the old alias. |
|
1167 for alias, data in self.alias_map.iteritems(): |
|
1168 lhs = data[LHS_ALIAS] |
|
1169 if lhs in change_map: |
|
1170 data = list(data) |
|
1171 data[LHS_ALIAS] = change_map[lhs] |
|
1172 self.alias_map[alias] = tuple(data) |
|
1173 |
|
1174 def bump_prefix(self, exceptions=()): |
|
1175 """ |
|
1176 Changes the alias prefix to the next letter in the alphabet and |
|
1177 relabels all the aliases. Even tables that previously had no alias will |
|
1178 get an alias after this call (it's mostly used for nested queries and |
|
1179 the outer query will already be using the non-aliased table name). |
|
1180 |
|
1181 Subclasses who create their own prefix should override this method to |
|
1182 produce a similar result (a new prefix and relabelled aliases). |
|
1183 |
|
1184 The 'exceptions' parameter is a container that holds alias names which |
|
1185 should not be changed. |
|
1186 """ |
|
1187 current = ord(self.alias_prefix) |
|
1188 assert current < ord('Z') |
|
1189 prefix = chr(current + 1) |
|
1190 self.alias_prefix = prefix |
|
1191 change_map = {} |
|
1192 for pos, alias in enumerate(self.tables): |
|
1193 if alias in exceptions: |
|
1194 continue |
|
1195 new_alias = '%s%d' % (prefix, pos) |
|
1196 change_map[alias] = new_alias |
|
1197 self.tables[pos] = new_alias |
|
1198 self.change_aliases(change_map) |
|
1199 |
|
1200 def get_initial_alias(self): |
|
1201 """ |
|
1202 Returns the first alias for this query, after increasing its reference |
|
1203 count. |
|
1204 """ |
|
1205 if self.tables: |
|
1206 alias = self.tables[0] |
|
1207 self.ref_alias(alias) |
|
1208 else: |
|
1209 alias = self.join((None, self.model._meta.db_table, None, None)) |
|
1210 return alias |
|
1211 |
|
1212 def count_active_tables(self): |
|
1213 """ |
|
1214 Returns the number of tables in this query with a non-zero reference |
|
1215 count. |
|
1216 """ |
|
1217 return len([1 for count in self.alias_refcount.itervalues() if count]) |
|
1218 |
|
1219 def join(self, connection, always_create=False, exclusions=(), |
|
1220 promote=False, outer_if_first=False, nullable=False, reuse=None): |
|
1221 """ |
|
1222 Returns an alias for the join in 'connection', either reusing an |
|
1223 existing alias for that join or creating a new one. 'connection' is a |
|
1224 tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing |
|
1225 table alias or a table name. The join correspods to the SQL equivalent |
|
1226 of:: |
|
1227 |
|
1228 lhs.lhs_col = table.col |
|
1229 |
|
1230 If 'always_create' is True and 'reuse' is None, a new alias is always |
|
1231 created, regardless of whether one already exists or not. If |
|
1232 'always_create' is True and 'reuse' is a set, an alias in 'reuse' that |
|
1233 matches the connection will be returned, if possible. If |
|
1234 'always_create' is False, the first existing alias that matches the |
|
1235 'connection' is returned, if any. Otherwise a new join is created. |
|
1236 |
|
1237 If 'exclusions' is specified, it is something satisfying the container |
|
1238 protocol ("foo in exclusions" must work) and specifies a list of |
|
1239 aliases that should not be returned, even if they satisfy the join. |
|
1240 |
|
1241 If 'promote' is True, the join type for the alias will be LOUTER (if |
|
1242 the alias previously existed, the join type will be promoted from INNER |
|
1243 to LOUTER, if necessary). |
|
1244 |
|
1245 If 'outer_if_first' is True and a new join is created, it will have the |
|
1246 LOUTER join type. This is used when joining certain types of querysets |
|
1247 and Q-objects together. |
|
1248 |
|
1249 If 'nullable' is True, the join can potentially involve NULL values and |
|
1250 is a candidate for promotion (to "left outer") when combining querysets. |
|
1251 """ |
|
1252 lhs, table, lhs_col, col = connection |
|
1253 if lhs in self.alias_map: |
|
1254 lhs_table = self.alias_map[lhs][TABLE_NAME] |
|
1255 else: |
|
1256 lhs_table = lhs |
|
1257 |
|
1258 if reuse and always_create and table in self.table_map: |
|
1259 # Convert the 'reuse' to case to be "exclude everything but the |
|
1260 # reusable set, minus exclusions, for this table". |
|
1261 exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions)) |
|
1262 always_create = False |
|
1263 t_ident = (lhs_table, table, lhs_col, col) |
|
1264 if not always_create: |
|
1265 for alias in self.join_map.get(t_ident, ()): |
|
1266 if alias not in exclusions: |
|
1267 if lhs_table and not self.alias_refcount[self.alias_map[alias][LHS_ALIAS]]: |
|
1268 # The LHS of this join tuple is no longer part of the |
|
1269 # query, so skip this possibility. |
|
1270 continue |
|
1271 if self.alias_map[alias][LHS_ALIAS] != lhs: |
|
1272 continue |
|
1273 self.ref_alias(alias) |
|
1274 if promote: |
|
1275 self.promote_alias(alias) |
|
1276 return alias |
|
1277 |
|
1278 # No reuse is possible, so we need a new alias. |
|
1279 alias, _ = self.table_alias(table, True) |
|
1280 if not lhs: |
|
1281 # Not all tables need to be joined to anything. No join type |
|
1282 # means the later columns are ignored. |
|
1283 join_type = None |
|
1284 elif promote or outer_if_first: |
|
1285 join_type = self.LOUTER |
|
1286 else: |
|
1287 join_type = self.INNER |
|
1288 join = (table, alias, join_type, lhs, lhs_col, col, nullable) |
|
1289 self.alias_map[alias] = join |
|
1290 if t_ident in self.join_map: |
|
1291 self.join_map[t_ident] += (alias,) |
|
1292 else: |
|
1293 self.join_map[t_ident] = (alias,) |
|
1294 self.rev_join_map[alias] = t_ident |
|
1295 return alias |
|
1296 |
|
1297 def setup_inherited_models(self): |
|
1298 """ |
|
1299 If the model that is the basis for this QuerySet inherits other models, |
|
1300 we need to ensure that those other models have their tables included in |
|
1301 the query. |
|
1302 |
|
1303 We do this as a separate step so that subclasses know which |
|
1304 tables are going to be active in the query, without needing to compute |
|
1305 all the select columns (this method is called from pre_sql_setup(), |
|
1306 whereas column determination is a later part, and side-effect, of |
|
1307 as_sql()). |
|
1308 """ |
|
1309 opts = self.model._meta |
|
1310 root_alias = self.tables[0] |
|
1311 seen = {None: root_alias} |
|
1312 |
|
1313 # Skip all proxy to the root proxied model |
|
1314 proxied_model = get_proxied_model(opts) |
|
1315 |
|
1316 for field, model in opts.get_fields_with_model(): |
|
1317 if model not in seen: |
|
1318 if model is proxied_model: |
|
1319 seen[model] = root_alias |
|
1320 else: |
|
1321 link_field = opts.get_ancestor_link(model) |
|
1322 seen[model] = self.join((root_alias, model._meta.db_table, |
|
1323 link_field.column, model._meta.pk.column)) |
|
1324 self.included_inherited_models = seen |
|
1325 |
|
1326 def remove_inherited_models(self): |
|
1327 """ |
|
1328 Undoes the effects of setup_inherited_models(). Should be called |
|
1329 whenever select columns (self.select) are set explicitly. |
|
1330 """ |
|
1331 for key, alias in self.included_inherited_models.items(): |
|
1332 if key: |
|
1333 self.unref_alias(alias) |
|
1334 self.included_inherited_models = {} |
|
1335 |
|
1336 def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1, |
|
1337 used=None, requested=None, restricted=None, nullable=None, |
|
1338 dupe_set=None, avoid_set=None): |
|
1339 """ |
|
1340 Fill in the information needed for a select_related query. The current |
|
1341 depth is measured as the number of connections away from the root model |
|
1342 (for example, cur_depth=1 means we are looking at models with direct |
|
1343 connections to the root model). |
|
1344 """ |
|
1345 if not restricted and self.max_depth and cur_depth > self.max_depth: |
|
1346 # We've recursed far enough; bail out. |
|
1347 return |
|
1348 |
|
1349 if not opts: |
|
1350 opts = self.get_meta() |
|
1351 root_alias = self.get_initial_alias() |
|
1352 self.related_select_cols = [] |
|
1353 self.related_select_fields = [] |
|
1354 if not used: |
|
1355 used = set() |
|
1356 if dupe_set is None: |
|
1357 dupe_set = set() |
|
1358 if avoid_set is None: |
|
1359 avoid_set = set() |
|
1360 orig_dupe_set = dupe_set |
|
1361 |
|
1362 # Setup for the case when only particular related fields should be |
|
1363 # included in the related selection. |
|
1364 if requested is None and restricted is not False: |
|
1365 if isinstance(self.select_related, dict): |
|
1366 requested = self.select_related |
|
1367 restricted = True |
|
1368 else: |
|
1369 restricted = False |
|
1370 |
|
1371 for f, model in opts.get_fields_with_model(): |
|
1372 if not select_related_descend(f, restricted, requested): |
|
1373 continue |
|
1374 # The "avoid" set is aliases we want to avoid just for this |
|
1375 # particular branch of the recursion. They aren't permanently |
|
1376 # forbidden from reuse in the related selection tables (which is |
|
1377 # what "used" specifies). |
|
1378 avoid = avoid_set.copy() |
|
1379 dupe_set = orig_dupe_set.copy() |
|
1380 table = f.rel.to._meta.db_table |
|
1381 if nullable or f.null: |
|
1382 promote = True |
|
1383 else: |
|
1384 promote = False |
|
1385 if model: |
|
1386 int_opts = opts |
|
1387 alias = root_alias |
|
1388 alias_chain = [] |
|
1389 for int_model in opts.get_base_chain(model): |
|
1390 # Proxy model have elements in base chain |
|
1391 # with no parents, assign the new options |
|
1392 # object and skip to the next base in that |
|
1393 # case |
|
1394 if not int_opts.parents[int_model]: |
|
1395 int_opts = int_model._meta |
|
1396 continue |
|
1397 lhs_col = int_opts.parents[int_model].column |
|
1398 dedupe = lhs_col in opts.duplicate_targets |
|
1399 if dedupe: |
|
1400 avoid.update(self.dupe_avoidance.get(id(opts), lhs_col), |
|
1401 ()) |
|
1402 dupe_set.add((opts, lhs_col)) |
|
1403 int_opts = int_model._meta |
|
1404 alias = self.join((alias, int_opts.db_table, lhs_col, |
|
1405 int_opts.pk.column), exclusions=used, |
|
1406 promote=promote) |
|
1407 alias_chain.append(alias) |
|
1408 for (dupe_opts, dupe_col) in dupe_set: |
|
1409 self.update_dupe_avoidance(dupe_opts, dupe_col, alias) |
|
1410 if self.alias_map[root_alias][JOIN_TYPE] == self.LOUTER: |
|
1411 self.promote_alias_chain(alias_chain, True) |
|
1412 else: |
|
1413 alias = root_alias |
|
1414 |
|
1415 dedupe = f.column in opts.duplicate_targets |
|
1416 if dupe_set or dedupe: |
|
1417 avoid.update(self.dupe_avoidance.get((id(opts), f.column), ())) |
|
1418 if dedupe: |
|
1419 dupe_set.add((opts, f.column)) |
|
1420 |
|
1421 alias = self.join((alias, table, f.column, |
|
1422 f.rel.get_related_field().column), |
|
1423 exclusions=used.union(avoid), promote=promote) |
|
1424 used.add(alias) |
|
1425 columns, aliases = self.get_default_columns(start_alias=alias, |
|
1426 opts=f.rel.to._meta, as_pairs=True) |
|
1427 self.related_select_cols.extend(columns) |
|
1428 if self.alias_map[alias][JOIN_TYPE] == self.LOUTER: |
|
1429 self.promote_alias_chain(aliases, True) |
|
1430 self.related_select_fields.extend(f.rel.to._meta.fields) |
|
1431 if restricted: |
|
1432 next = requested.get(f.name, {}) |
|
1433 else: |
|
1434 next = False |
|
1435 if f.null is not None: |
|
1436 new_nullable = f.null |
|
1437 else: |
|
1438 new_nullable = None |
|
1439 for dupe_opts, dupe_col in dupe_set: |
|
1440 self.update_dupe_avoidance(dupe_opts, dupe_col, alias) |
|
1441 self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1, |
|
1442 used, next, restricted, new_nullable, dupe_set, avoid) |
|
1443 |
|
1444 def add_aggregate(self, aggregate, model, alias, is_summary): |
|
1445 """ |
|
1446 Adds a single aggregate expression to the Query |
|
1447 """ |
|
1448 opts = model._meta |
|
1449 field_list = aggregate.lookup.split(LOOKUP_SEP) |
|
1450 if (len(field_list) == 1 and |
|
1451 aggregate.lookup in self.aggregates.keys()): |
|
1452 # Aggregate is over an annotation |
|
1453 field_name = field_list[0] |
|
1454 col = field_name |
|
1455 source = self.aggregates[field_name] |
|
1456 if not is_summary: |
|
1457 raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % ( |
|
1458 aggregate.name, field_name, field_name)) |
|
1459 elif ((len(field_list) > 1) or |
|
1460 (field_list[0] not in [i.name for i in opts.fields]) or |
|
1461 self.group_by is None or |
|
1462 not is_summary): |
|
1463 # If: |
|
1464 # - the field descriptor has more than one part (foo__bar), or |
|
1465 # - the field descriptor is referencing an m2m/m2o field, or |
|
1466 # - this is a reference to a model field (possibly inherited), or |
|
1467 # - this is an annotation over a model field |
|
1468 # then we need to explore the joins that are required. |
|
1469 |
|
1470 field, source, opts, join_list, last, _ = self.setup_joins( |
|
1471 field_list, opts, self.get_initial_alias(), False) |
|
1472 |
|
1473 # Process the join chain to see if it can be trimmed |
|
1474 col, _, join_list = self.trim_joins(source, join_list, last, False) |
|
1475 |
|
1476 # If the aggregate references a model or field that requires a join, |
|
1477 # those joins must be LEFT OUTER - empty join rows must be returned |
|
1478 # in order for zeros to be returned for those aggregates. |
|
1479 for column_alias in join_list: |
|
1480 self.promote_alias(column_alias, unconditional=True) |
|
1481 |
|
1482 col = (join_list[-1], col) |
|
1483 else: |
|
1484 # The simplest cases. No joins required - |
|
1485 # just reference the provided column alias. |
|
1486 field_name = field_list[0] |
|
1487 source = opts.get_field(field_name) |
|
1488 col = field_name |
|
1489 |
|
1490 # Add the aggregate to the query |
|
1491 alias = truncate_name(alias, self.connection.ops.max_name_length()) |
|
1492 aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary) |
|
1493 |
|
1494 def add_filter(self, filter_expr, connector=AND, negate=False, trim=False, |
|
1495 can_reuse=None, process_extras=True): |
|
1496 """ |
|
1497 Add a single filter to the query. The 'filter_expr' is a pair: |
|
1498 (filter_string, value). E.g. ('name__contains', 'fred') |
|
1499 |
|
1500 If 'negate' is True, this is an exclude() filter. It's important to |
|
1501 note that this method does not negate anything in the where-clause |
|
1502 object when inserting the filter constraints. This is because negated |
|
1503 filters often require multiple calls to add_filter() and the negation |
|
1504 should only happen once. So the caller is responsible for this (the |
|
1505 caller will normally be add_q(), so that as an example). |
|
1506 |
|
1507 If 'trim' is True, we automatically trim the final join group (used |
|
1508 internally when constructing nested queries). |
|
1509 |
|
1510 If 'can_reuse' is a set, we are processing a component of a |
|
1511 multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse' |
|
1512 will be a set of table aliases that can be reused in this filter, even |
|
1513 if we would otherwise force the creation of new aliases for a join |
|
1514 (needed for nested Q-filters). The set is updated by this method. |
|
1515 |
|
1516 If 'process_extras' is set, any extra filters returned from the table |
|
1517 joining process will be processed. This parameter is set to False |
|
1518 during the processing of extra filters to avoid infinite recursion. |
|
1519 """ |
|
1520 arg, value = filter_expr |
|
1521 parts = arg.split(LOOKUP_SEP) |
|
1522 if not parts: |
|
1523 raise FieldError("Cannot parse keyword query %r" % arg) |
|
1524 |
|
1525 # Work out the lookup type and remove it from 'parts', if necessary. |
|
1526 if len(parts) == 1 or parts[-1] not in self.query_terms: |
|
1527 lookup_type = 'exact' |
|
1528 else: |
|
1529 lookup_type = parts.pop() |
|
1530 |
|
1531 # By default, this is a WHERE clause. If an aggregate is referenced |
|
1532 # in the value, the filter will be promoted to a HAVING |
|
1533 having_clause = False |
|
1534 |
|
1535 # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all |
|
1536 # uses of None as a query value. |
|
1537 if value is None: |
|
1538 if lookup_type != 'exact': |
|
1539 raise ValueError("Cannot use None as a query value") |
|
1540 lookup_type = 'isnull' |
|
1541 value = True |
|
1542 elif (value == '' and lookup_type == 'exact' and |
|
1543 connection.features.interprets_empty_strings_as_nulls): |
|
1544 lookup_type = 'isnull' |
|
1545 value = True |
|
1546 elif callable(value): |
|
1547 value = value() |
|
1548 elif hasattr(value, 'evaluate'): |
|
1549 # If value is a query expression, evaluate it |
|
1550 value = SQLEvaluator(value, self) |
|
1551 having_clause = value.contains_aggregate |
|
1552 |
|
1553 for alias, aggregate in self.aggregates.items(): |
|
1554 if alias == parts[0]: |
|
1555 entry = self.where_class() |
|
1556 entry.add((aggregate, lookup_type, value), AND) |
|
1557 if negate: |
|
1558 entry.negate() |
|
1559 self.having.add(entry, AND) |
|
1560 return |
|
1561 |
|
1562 opts = self.get_meta() |
|
1563 alias = self.get_initial_alias() |
|
1564 allow_many = trim or not negate |
|
1565 |
|
1566 try: |
|
1567 field, target, opts, join_list, last, extra_filters = self.setup_joins( |
|
1568 parts, opts, alias, True, allow_many, can_reuse=can_reuse, |
|
1569 negate=negate, process_extras=process_extras) |
|
1570 except MultiJoin, e: |
|
1571 self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]), |
|
1572 can_reuse) |
|
1573 return |
|
1574 |
|
1575 if (lookup_type == 'isnull' and value is True and not negate and |
|
1576 len(join_list) > 1): |
|
1577 # If the comparison is against NULL, we may need to use some left |
|
1578 # outer joins when creating the join chain. This is only done when |
|
1579 # needed, as it's less efficient at the database level. |
|
1580 self.promote_alias_chain(join_list) |
|
1581 |
|
1582 # Process the join list to see if we can remove any inner joins from |
|
1583 # the far end (fewer tables in a query is better). |
|
1584 col, alias, join_list = self.trim_joins(target, join_list, last, trim) |
|
1585 |
|
1586 if connector == OR: |
|
1587 # Some joins may need to be promoted when adding a new filter to a |
|
1588 # disjunction. We walk the list of new joins and where it diverges |
|
1589 # from any previous joins (ref count is 1 in the table list), we |
|
1590 # make the new additions (and any existing ones not used in the new |
|
1591 # join list) an outer join. |
|
1592 join_it = iter(join_list) |
|
1593 table_it = iter(self.tables) |
|
1594 join_it.next(), table_it.next() |
|
1595 table_promote = False |
|
1596 join_promote = False |
|
1597 for join in join_it: |
|
1598 table = table_it.next() |
|
1599 if join == table and self.alias_refcount[join] > 1: |
|
1600 continue |
|
1601 join_promote = self.promote_alias(join) |
|
1602 if table != join: |
|
1603 table_promote = self.promote_alias(table) |
|
1604 break |
|
1605 self.promote_alias_chain(join_it, join_promote) |
|
1606 self.promote_alias_chain(table_it, table_promote) |
|
1607 |
|
1608 |
|
1609 if having_clause: |
|
1610 self.having.add((Constraint(alias, col, field), lookup_type, value), |
|
1611 connector) |
|
1612 else: |
|
1613 self.where.add((Constraint(alias, col, field), lookup_type, value), |
|
1614 connector) |
|
1615 |
|
1616 if negate: |
|
1617 self.promote_alias_chain(join_list) |
|
1618 if lookup_type != 'isnull': |
|
1619 if len(join_list) > 1: |
|
1620 for alias in join_list: |
|
1621 if self.alias_map[alias][JOIN_TYPE] == self.LOUTER: |
|
1622 j_col = self.alias_map[alias][RHS_JOIN_COL] |
|
1623 entry = self.where_class() |
|
1624 entry.add((Constraint(alias, j_col, None), 'isnull', True), AND) |
|
1625 entry.negate() |
|
1626 self.where.add(entry, AND) |
|
1627 break |
|
1628 elif not (lookup_type == 'in' |
|
1629 and not hasattr(value, 'as_sql') |
|
1630 and not hasattr(value, '_as_sql') |
|
1631 and not value) and field.null: |
|
1632 # Leaky abstraction artifact: We have to specifically |
|
1633 # exclude the "foo__in=[]" case from this handling, because |
|
1634 # it's short-circuited in the Where class. |
|
1635 # We also need to handle the case where a subquery is provided |
|
1636 entry = self.where_class() |
|
1637 entry.add((Constraint(alias, col, None), 'isnull', True), AND) |
|
1638 entry.negate() |
|
1639 self.where.add(entry, AND) |
|
1640 |
|
1641 if can_reuse is not None: |
|
1642 can_reuse.update(join_list) |
|
1643 if process_extras: |
|
1644 for filter in extra_filters: |
|
1645 self.add_filter(filter, negate=negate, can_reuse=can_reuse, |
|
1646 process_extras=False) |
|
1647 |
|
1648 def add_q(self, q_object, used_aliases=None): |
|
1649 """ |
|
1650 Adds a Q-object to the current filter. |
|
1651 |
|
1652 Can also be used to add anything that has an 'add_to_query()' method. |
|
1653 """ |
|
1654 if used_aliases is None: |
|
1655 used_aliases = self.used_aliases |
|
1656 if hasattr(q_object, 'add_to_query'): |
|
1657 # Complex custom objects are responsible for adding themselves. |
|
1658 q_object.add_to_query(self, used_aliases) |
|
1659 else: |
|
1660 if self.where and q_object.connector != AND and len(q_object) > 1: |
|
1661 self.where.start_subtree(AND) |
|
1662 subtree = True |
|
1663 else: |
|
1664 subtree = False |
|
1665 connector = AND |
|
1666 for child in q_object.children: |
|
1667 if connector == OR: |
|
1668 refcounts_before = self.alias_refcount.copy() |
|
1669 if isinstance(child, Node): |
|
1670 self.where.start_subtree(connector) |
|
1671 self.add_q(child, used_aliases) |
|
1672 self.where.end_subtree() |
|
1673 else: |
|
1674 self.add_filter(child, connector, q_object.negated, |
|
1675 can_reuse=used_aliases) |
|
1676 if connector == OR: |
|
1677 # Aliases that were newly added or not used at all need to |
|
1678 # be promoted to outer joins if they are nullable relations. |
|
1679 # (they shouldn't turn the whole conditional into the empty |
|
1680 # set just because they don't match anything). |
|
1681 self.promote_unused_aliases(refcounts_before, used_aliases) |
|
1682 connector = q_object.connector |
|
1683 if q_object.negated: |
|
1684 self.where.negate() |
|
1685 if subtree: |
|
1686 self.where.end_subtree() |
|
1687 if self.filter_is_sticky: |
|
1688 self.used_aliases = used_aliases |
|
1689 |
|
1690 def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True, |
|
1691 allow_explicit_fk=False, can_reuse=None, negate=False, |
|
1692 process_extras=True): |
|
1693 """ |
|
1694 Compute the necessary table joins for the passage through the fields |
|
1695 given in 'names'. 'opts' is the Options class for the current model |
|
1696 (which gives the table we are joining to), 'alias' is the alias for the |
|
1697 table we are joining to. If dupe_multis is True, any many-to-many or |
|
1698 many-to-one joins will always create a new alias (necessary for |
|
1699 disjunctive filters). If can_reuse is not None, it's a list of aliases |
|
1700 that can be reused in these joins (nothing else can be reused in this |
|
1701 case). Finally, 'negate' is used in the same sense as for add_filter() |
|
1702 -- it indicates an exclude() filter, or something similar. It is only |
|
1703 passed in here so that it can be passed to a field's extra_filter() for |
|
1704 customised behaviour. |
|
1705 |
|
1706 Returns the final field involved in the join, the target database |
|
1707 column (used for any 'where' constraint), the final 'opts' value and the |
|
1708 list of tables joined. |
|
1709 """ |
|
1710 joins = [alias] |
|
1711 last = [0] |
|
1712 dupe_set = set() |
|
1713 exclusions = set() |
|
1714 extra_filters = [] |
|
1715 for pos, name in enumerate(names): |
|
1716 try: |
|
1717 exclusions.add(int_alias) |
|
1718 except NameError: |
|
1719 pass |
|
1720 exclusions.add(alias) |
|
1721 last.append(len(joins)) |
|
1722 if name == 'pk': |
|
1723 name = opts.pk.name |
|
1724 try: |
|
1725 field, model, direct, m2m = opts.get_field_by_name(name) |
|
1726 except FieldDoesNotExist: |
|
1727 for f in opts.fields: |
|
1728 if allow_explicit_fk and name == f.attname: |
|
1729 # XXX: A hack to allow foo_id to work in values() for |
|
1730 # backwards compatibility purposes. If we dropped that |
|
1731 # feature, this could be removed. |
|
1732 field, model, direct, m2m = opts.get_field_by_name(f.name) |
|
1733 break |
|
1734 else: |
|
1735 names = opts.get_all_field_names() + self.aggregate_select.keys() |
|
1736 raise FieldError("Cannot resolve keyword %r into field. " |
|
1737 "Choices are: %s" % (name, ", ".join(names))) |
|
1738 |
|
1739 if not allow_many and (m2m or not direct): |
|
1740 for alias in joins: |
|
1741 self.unref_alias(alias) |
|
1742 raise MultiJoin(pos + 1) |
|
1743 if model: |
|
1744 # The field lives on a base class of the current model. |
|
1745 # Skip the chain of proxy to the concrete proxied model |
|
1746 proxied_model = get_proxied_model(opts) |
|
1747 |
|
1748 for int_model in opts.get_base_chain(model): |
|
1749 if int_model is proxied_model: |
|
1750 opts = int_model._meta |
|
1751 else: |
|
1752 lhs_col = opts.parents[int_model].column |
|
1753 dedupe = lhs_col in opts.duplicate_targets |
|
1754 if dedupe: |
|
1755 exclusions.update(self.dupe_avoidance.get( |
|
1756 (id(opts), lhs_col), ())) |
|
1757 dupe_set.add((opts, lhs_col)) |
|
1758 opts = int_model._meta |
|
1759 alias = self.join((alias, opts.db_table, lhs_col, |
|
1760 opts.pk.column), exclusions=exclusions) |
|
1761 joins.append(alias) |
|
1762 exclusions.add(alias) |
|
1763 for (dupe_opts, dupe_col) in dupe_set: |
|
1764 self.update_dupe_avoidance(dupe_opts, dupe_col, |
|
1765 alias) |
|
1766 cached_data = opts._join_cache.get(name) |
|
1767 orig_opts = opts |
|
1768 dupe_col = direct and field.column or field.field.column |
|
1769 dedupe = dupe_col in opts.duplicate_targets |
|
1770 if dupe_set or dedupe: |
|
1771 if dedupe: |
|
1772 dupe_set.add((opts, dupe_col)) |
|
1773 exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col), |
|
1774 ())) |
|
1775 |
|
1776 if process_extras and hasattr(field, 'extra_filters'): |
|
1777 extra_filters.extend(field.extra_filters(names, pos, negate)) |
|
1778 if direct: |
|
1779 if m2m: |
|
1780 # Many-to-many field defined on the current model. |
|
1781 if cached_data: |
|
1782 (table1, from_col1, to_col1, table2, from_col2, |
|
1783 to_col2, opts, target) = cached_data |
|
1784 else: |
|
1785 table1 = field.m2m_db_table() |
|
1786 from_col1 = opts.pk.column |
|
1787 to_col1 = field.m2m_column_name() |
|
1788 opts = field.rel.to._meta |
|
1789 table2 = opts.db_table |
|
1790 from_col2 = field.m2m_reverse_name() |
|
1791 to_col2 = opts.pk.column |
|
1792 target = opts.pk |
|
1793 orig_opts._join_cache[name] = (table1, from_col1, |
|
1794 to_col1, table2, from_col2, to_col2, opts, |
|
1795 target) |
|
1796 |
|
1797 int_alias = self.join((alias, table1, from_col1, to_col1), |
|
1798 dupe_multis, exclusions, nullable=True, |
|
1799 reuse=can_reuse) |
|
1800 if int_alias == table2 and from_col2 == to_col2: |
|
1801 joins.append(int_alias) |
|
1802 alias = int_alias |
|
1803 else: |
|
1804 alias = self.join( |
|
1805 (int_alias, table2, from_col2, to_col2), |
|
1806 dupe_multis, exclusions, nullable=True, |
|
1807 reuse=can_reuse) |
|
1808 joins.extend([int_alias, alias]) |
|
1809 elif field.rel: |
|
1810 # One-to-one or many-to-one field |
|
1811 if cached_data: |
|
1812 (table, from_col, to_col, opts, target) = cached_data |
|
1813 else: |
|
1814 opts = field.rel.to._meta |
|
1815 target = field.rel.get_related_field() |
|
1816 table = opts.db_table |
|
1817 from_col = field.column |
|
1818 to_col = target.column |
|
1819 orig_opts._join_cache[name] = (table, from_col, to_col, |
|
1820 opts, target) |
|
1821 |
|
1822 alias = self.join((alias, table, from_col, to_col), |
|
1823 exclusions=exclusions, nullable=field.null) |
|
1824 joins.append(alias) |
|
1825 else: |
|
1826 # Non-relation fields. |
|
1827 target = field |
|
1828 break |
|
1829 else: |
|
1830 orig_field = field |
|
1831 field = field.field |
|
1832 if m2m: |
|
1833 # Many-to-many field defined on the target model. |
|
1834 if cached_data: |
|
1835 (table1, from_col1, to_col1, table2, from_col2, |
|
1836 to_col2, opts, target) = cached_data |
|
1837 else: |
|
1838 table1 = field.m2m_db_table() |
|
1839 from_col1 = opts.pk.column |
|
1840 to_col1 = field.m2m_reverse_name() |
|
1841 opts = orig_field.opts |
|
1842 table2 = opts.db_table |
|
1843 from_col2 = field.m2m_column_name() |
|
1844 to_col2 = opts.pk.column |
|
1845 target = opts.pk |
|
1846 orig_opts._join_cache[name] = (table1, from_col1, |
|
1847 to_col1, table2, from_col2, to_col2, opts, |
|
1848 target) |
|
1849 |
|
1850 int_alias = self.join((alias, table1, from_col1, to_col1), |
|
1851 dupe_multis, exclusions, nullable=True, |
|
1852 reuse=can_reuse) |
|
1853 alias = self.join((int_alias, table2, from_col2, to_col2), |
|
1854 dupe_multis, exclusions, nullable=True, |
|
1855 reuse=can_reuse) |
|
1856 joins.extend([int_alias, alias]) |
|
1857 else: |
|
1858 # One-to-many field (ForeignKey defined on the target model) |
|
1859 if cached_data: |
|
1860 (table, from_col, to_col, opts, target) = cached_data |
|
1861 else: |
|
1862 local_field = opts.get_field_by_name( |
|
1863 field.rel.field_name)[0] |
|
1864 opts = orig_field.opts |
|
1865 table = opts.db_table |
|
1866 from_col = local_field.column |
|
1867 to_col = field.column |
|
1868 target = opts.pk |
|
1869 orig_opts._join_cache[name] = (table, from_col, to_col, |
|
1870 opts, target) |
|
1871 |
|
1872 alias = self.join((alias, table, from_col, to_col), |
|
1873 dupe_multis, exclusions, nullable=True, |
|
1874 reuse=can_reuse) |
|
1875 joins.append(alias) |
|
1876 |
|
1877 for (dupe_opts, dupe_col) in dupe_set: |
|
1878 try: |
|
1879 self.update_dupe_avoidance(dupe_opts, dupe_col, int_alias) |
|
1880 except NameError: |
|
1881 self.update_dupe_avoidance(dupe_opts, dupe_col, alias) |
|
1882 |
|
1883 if pos != len(names) - 1: |
|
1884 if pos == len(names) - 2: |
|
1885 raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1])) |
|
1886 else: |
|
1887 raise FieldError("Join on field %r not permitted." % name) |
|
1888 |
|
1889 return field, target, opts, joins, last, extra_filters |
|
1890 |
|
1891 def trim_joins(self, target, join_list, last, trim): |
|
1892 """ |
|
1893 Sometimes joins at the end of a multi-table sequence can be trimmed. If |
|
1894 the final join is against the same column as we are comparing against, |
|
1895 and is an inner join, we can go back one step in a join chain and |
|
1896 compare against the LHS of the join instead (and then repeat the |
|
1897 optimization). The result, potentially, involves less table joins. |
|
1898 |
|
1899 The 'target' parameter is the final field being joined to, 'join_list' |
|
1900 is the full list of join aliases. |
|
1901 |
|
1902 The 'last' list contains offsets into 'join_list', corresponding to |
|
1903 each component of the filter. Many-to-many relations, for example, add |
|
1904 two tables to the join list and we want to deal with both tables the |
|
1905 same way, so 'last' has an entry for the first of the two tables and |
|
1906 then the table immediately after the second table, in that case. |
|
1907 |
|
1908 The 'trim' parameter forces the final piece of the join list to be |
|
1909 trimmed before anything. See the documentation of add_filter() for |
|
1910 details about this. |
|
1911 |
|
1912 Returns the final active column and table alias and the new active |
|
1913 join_list. |
|
1914 """ |
|
1915 final = len(join_list) |
|
1916 penultimate = last.pop() |
|
1917 if penultimate == final: |
|
1918 penultimate = last.pop() |
|
1919 if trim and len(join_list) > 1: |
|
1920 extra = join_list[penultimate:] |
|
1921 join_list = join_list[:penultimate] |
|
1922 final = penultimate |
|
1923 penultimate = last.pop() |
|
1924 col = self.alias_map[extra[0]][LHS_JOIN_COL] |
|
1925 for alias in extra: |
|
1926 self.unref_alias(alias) |
|
1927 else: |
|
1928 col = target.column |
|
1929 alias = join_list[-1] |
|
1930 while final > 1: |
|
1931 join = self.alias_map[alias] |
|
1932 if col != join[RHS_JOIN_COL] or join[JOIN_TYPE] != self.INNER: |
|
1933 break |
|
1934 self.unref_alias(alias) |
|
1935 alias = join[LHS_ALIAS] |
|
1936 col = join[LHS_JOIN_COL] |
|
1937 join_list = join_list[:-1] |
|
1938 final -= 1 |
|
1939 if final == penultimate: |
|
1940 penultimate = last.pop() |
|
1941 return col, alias, join_list |
|
1942 |
|
1943 def update_dupe_avoidance(self, opts, col, alias): |
|
1944 """ |
|
1945 For a column that is one of multiple pointing to the same table, update |
|
1946 the internal data structures to note that this alias shouldn't be used |
|
1947 for those other columns. |
|
1948 """ |
|
1949 ident = id(opts) |
|
1950 for name in opts.duplicate_targets[col]: |
|
1951 try: |
|
1952 self.dupe_avoidance[ident, name].add(alias) |
|
1953 except KeyError: |
|
1954 self.dupe_avoidance[ident, name] = set([alias]) |
|
1955 |
|
1956 def split_exclude(self, filter_expr, prefix, can_reuse): |
|
1957 """ |
|
1958 When doing an exclude against any kind of N-to-many relation, we need |
|
1959 to use a subquery. This method constructs the nested query, given the |
|
1960 original exclude filter (filter_expr) and the portion up to the first |
|
1961 N-to-many relation field. |
|
1962 """ |
|
1963 query = Query(self.model, self.connection) |
|
1964 query.add_filter(filter_expr, can_reuse=can_reuse) |
|
1965 query.bump_prefix() |
|
1966 query.clear_ordering(True) |
|
1967 query.set_start(prefix) |
|
1968 self.add_filter(('%s__in' % prefix, query), negate=True, trim=True, |
|
1969 can_reuse=can_reuse) |
|
1970 |
|
1971 # If there's more than one join in the inner query (before any initial |
|
1972 # bits were trimmed -- which means the last active table is more than |
|
1973 # two places into the alias list), we need to also handle the |
|
1974 # possibility that the earlier joins don't match anything by adding a |
|
1975 # comparison to NULL (e.g. in |
|
1976 # Tag.objects.exclude(parent__parent__name='t1'), a tag with no parent |
|
1977 # would otherwise be overlooked). |
|
1978 active_positions = [pos for (pos, count) in |
|
1979 enumerate(query.alias_refcount.itervalues()) if count] |
|
1980 if active_positions[-1] > 1: |
|
1981 self.add_filter(('%s__isnull' % prefix, False), negate=True, |
|
1982 trim=True, can_reuse=can_reuse) |
|
1983 |
|
1984 def set_limits(self, low=None, high=None): |
|
1985 """ |
|
1986 Adjusts the limits on the rows retrieved. We use low/high to set these, |
|
1987 as it makes it more Pythonic to read and write. When the SQL query is |
|
1988 created, they are converted to the appropriate offset and limit values. |
|
1989 |
|
1990 Any limits passed in here are applied relative to the existing |
|
1991 constraints. So low is added to the current low value and both will be |
|
1992 clamped to any existing high value. |
|
1993 """ |
|
1994 if high is not None: |
|
1995 if self.high_mark is not None: |
|
1996 self.high_mark = min(self.high_mark, self.low_mark + high) |
|
1997 else: |
|
1998 self.high_mark = self.low_mark + high |
|
1999 if low is not None: |
|
2000 if self.high_mark is not None: |
|
2001 self.low_mark = min(self.high_mark, self.low_mark + low) |
|
2002 else: |
|
2003 self.low_mark = self.low_mark + low |
|
2004 |
|
2005 def clear_limits(self): |
|
2006 """ |
|
2007 Clears any existing limits. |
|
2008 """ |
|
2009 self.low_mark, self.high_mark = 0, None |
|
2010 |
|
2011 def can_filter(self): |
|
2012 """ |
|
2013 Returns True if adding filters to this instance is still possible. |
|
2014 |
|
2015 Typically, this means no limits or offsets have been put on the results. |
|
2016 """ |
|
2017 return not self.low_mark and self.high_mark is None |
|
2018 |
|
2019 def clear_select_fields(self): |
|
2020 """ |
|
2021 Clears the list of fields to select (but not extra_select columns). |
|
2022 Some queryset types completely replace any existing list of select |
|
2023 columns. |
|
2024 """ |
|
2025 self.select = [] |
|
2026 self.select_fields = [] |
|
2027 |
|
2028 def add_fields(self, field_names, allow_m2m=True): |
|
2029 """ |
|
2030 Adds the given (model) fields to the select set. The field names are |
|
2031 added in the order specified. |
|
2032 """ |
|
2033 alias = self.get_initial_alias() |
|
2034 opts = self.get_meta() |
|
2035 |
|
2036 try: |
|
2037 for name in field_names: |
|
2038 field, target, u2, joins, u3, u4 = self.setup_joins( |
|
2039 name.split(LOOKUP_SEP), opts, alias, False, allow_m2m, |
|
2040 True) |
|
2041 final_alias = joins[-1] |
|
2042 col = target.column |
|
2043 if len(joins) > 1: |
|
2044 join = self.alias_map[final_alias] |
|
2045 if col == join[RHS_JOIN_COL]: |
|
2046 self.unref_alias(final_alias) |
|
2047 final_alias = join[LHS_ALIAS] |
|
2048 col = join[LHS_JOIN_COL] |
|
2049 joins = joins[:-1] |
|
2050 self.promote_alias_chain(joins[1:]) |
|
2051 self.select.append((final_alias, col)) |
|
2052 self.select_fields.append(field) |
|
2053 except MultiJoin: |
|
2054 raise FieldError("Invalid field name: '%s'" % name) |
|
2055 except FieldError: |
|
2056 names = opts.get_all_field_names() + self.extra.keys() + self.aggregate_select.keys() |
|
2057 names.sort() |
|
2058 raise FieldError("Cannot resolve keyword %r into field. " |
|
2059 "Choices are: %s" % (name, ", ".join(names))) |
|
2060 self.remove_inherited_models() |
|
2061 |
|
2062 def add_ordering(self, *ordering): |
|
2063 """ |
|
2064 Adds items from the 'ordering' sequence to the query's "order by" |
|
2065 clause. These items are either field names (not column names) -- |
|
2066 possibly with a direction prefix ('-' or '?') -- or ordinals, |
|
2067 corresponding to column positions in the 'select' list. |
|
2068 |
|
2069 If 'ordering' is empty, all ordering is cleared from the query. |
|
2070 """ |
|
2071 errors = [] |
|
2072 for item in ordering: |
|
2073 if not ORDER_PATTERN.match(item): |
|
2074 errors.append(item) |
|
2075 if errors: |
|
2076 raise FieldError('Invalid order_by arguments: %s' % errors) |
|
2077 if ordering: |
|
2078 self.order_by.extend(ordering) |
|
2079 else: |
|
2080 self.default_ordering = False |
|
2081 |
|
2082 def clear_ordering(self, force_empty=False): |
|
2083 """ |
|
2084 Removes any ordering settings. If 'force_empty' is True, there will be |
|
2085 no ordering in the resulting query (not even the model's default). |
|
2086 """ |
|
2087 self.order_by = [] |
|
2088 self.extra_order_by = () |
|
2089 if force_empty: |
|
2090 self.default_ordering = False |
|
2091 |
|
2092 def set_group_by(self): |
|
2093 """ |
|
2094 Expands the GROUP BY clause required by the query. |
|
2095 |
|
2096 This will usually be the set of all non-aggregate fields in the |
|
2097 return data. If the database backend supports grouping by the |
|
2098 primary key, and the query would be equivalent, the optimization |
|
2099 will be made automatically. |
|
2100 """ |
|
2101 self.group_by = [] |
|
2102 if self.connection.features.allows_group_by_pk: |
|
2103 if len(self.select) == len(self.model._meta.fields): |
|
2104 self.group_by.append((self.model._meta.db_table, |
|
2105 self.model._meta.pk.column)) |
|
2106 return |
|
2107 |
|
2108 for sel in self.select: |
|
2109 self.group_by.append(sel) |
|
2110 |
|
2111 def add_count_column(self): |
|
2112 """ |
|
2113 Converts the query to do count(...) or count(distinct(pk)) in order to |
|
2114 get its size. |
|
2115 """ |
|
2116 if not self.distinct: |
|
2117 if not self.select: |
|
2118 count = self.aggregates_module.Count('*', is_summary=True) |
|
2119 else: |
|
2120 assert len(self.select) == 1, \ |
|
2121 "Cannot add count col with multiple cols in 'select': %r" % self.select |
|
2122 count = self.aggregates_module.Count(self.select[0]) |
|
2123 else: |
|
2124 opts = self.model._meta |
|
2125 if not self.select: |
|
2126 count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column), |
|
2127 is_summary=True, distinct=True) |
|
2128 else: |
|
2129 # Because of SQL portability issues, multi-column, distinct |
|
2130 # counts need a sub-query -- see get_count() for details. |
|
2131 assert len(self.select) == 1, \ |
|
2132 "Cannot add count col with multiple cols in 'select'." |
|
2133 |
|
2134 count = self.aggregates_module.Count(self.select[0], distinct=True) |
|
2135 # Distinct handling is done in Count(), so don't do it at this |
|
2136 # level. |
|
2137 self.distinct = False |
|
2138 |
|
2139 # Set only aggregate to be the count column. |
|
2140 # Clear out the select cache to reflect the new unmasked aggregates. |
|
2141 self.aggregates = {None: count} |
|
2142 self.set_aggregate_mask(None) |
|
2143 self.group_by = None |
|
2144 |
|
2145 def add_select_related(self, fields): |
|
2146 """ |
|
2147 Sets up the select_related data structure so that we only select |
|
2148 certain related models (as opposed to all models, when |
|
2149 self.select_related=True). |
|
2150 """ |
|
2151 field_dict = {} |
|
2152 for field in fields: |
|
2153 d = field_dict |
|
2154 for part in field.split(LOOKUP_SEP): |
|
2155 d = d.setdefault(part, {}) |
|
2156 self.select_related = field_dict |
|
2157 self.related_select_cols = [] |
|
2158 self.related_select_fields = [] |
|
2159 |
|
2160 def add_extra(self, select, select_params, where, params, tables, order_by): |
|
2161 """ |
|
2162 Adds data to the various extra_* attributes for user-created additions |
|
2163 to the query. |
|
2164 """ |
|
2165 if select: |
|
2166 # We need to pair any placeholder markers in the 'select' |
|
2167 # dictionary with their parameters in 'select_params' so that |
|
2168 # subsequent updates to the select dictionary also adjust the |
|
2169 # parameters appropriately. |
|
2170 select_pairs = SortedDict() |
|
2171 if select_params: |
|
2172 param_iter = iter(select_params) |
|
2173 else: |
|
2174 param_iter = iter([]) |
|
2175 for name, entry in select.items(): |
|
2176 entry = force_unicode(entry) |
|
2177 entry_params = [] |
|
2178 pos = entry.find("%s") |
|
2179 while pos != -1: |
|
2180 entry_params.append(param_iter.next()) |
|
2181 pos = entry.find("%s", pos + 2) |
|
2182 select_pairs[name] = (entry, entry_params) |
|
2183 # This is order preserving, since self.extra_select is a SortedDict. |
|
2184 self.extra.update(select_pairs) |
|
2185 if where: |
|
2186 self.extra_where += tuple(where) |
|
2187 if params: |
|
2188 self.extra_params += tuple(params) |
|
2189 if tables: |
|
2190 self.extra_tables += tuple(tables) |
|
2191 if order_by: |
|
2192 self.extra_order_by = order_by |
|
2193 |
|
2194 def clear_deferred_loading(self): |
|
2195 """ |
|
2196 Remove any fields from the deferred loading set. |
|
2197 """ |
|
2198 self.deferred_loading = (set(), True) |
|
2199 |
|
2200 def add_deferred_loading(self, field_names): |
|
2201 """ |
|
2202 Add the given list of model field names to the set of fields to |
|
2203 exclude from loading from the database when automatic column selection |
|
2204 is done. The new field names are added to any existing field names that |
|
2205 are deferred (or removed from any existing field names that are marked |
|
2206 as the only ones for immediate loading). |
|
2207 """ |
|
2208 # Fields on related models are stored in the literal double-underscore |
|
2209 # format, so that we can use a set datastructure. We do the foo__bar |
|
2210 # splitting and handling when computing the SQL colum names (as part of |
|
2211 # get_columns()). |
|
2212 existing, defer = self.deferred_loading |
|
2213 if defer: |
|
2214 # Add to existing deferred names. |
|
2215 self.deferred_loading = existing.union(field_names), True |
|
2216 else: |
|
2217 # Remove names from the set of any existing "immediate load" names. |
|
2218 self.deferred_loading = existing.difference(field_names), False |
|
2219 |
|
2220 def add_immediate_loading(self, field_names): |
|
2221 """ |
|
2222 Add the given list of model field names to the set of fields to |
|
2223 retrieve when the SQL is executed ("immediate loading" fields). The |
|
2224 field names replace any existing immediate loading field names. If |
|
2225 there are field names already specified for deferred loading, those |
|
2226 names are removed from the new field_names before storing the new names |
|
2227 for immediate loading. (That is, immediate loading overrides any |
|
2228 existing immediate values, but respects existing deferrals.) |
|
2229 """ |
|
2230 existing, defer = self.deferred_loading |
|
2231 if defer: |
|
2232 # Remove any existing deferred names from the current set before |
|
2233 # setting the new names. |
|
2234 self.deferred_loading = set(field_names).difference(existing), False |
|
2235 else: |
|
2236 # Replace any existing "immediate load" field names. |
|
2237 self.deferred_loading = set(field_names), False |
|
2238 |
|
2239 def get_loaded_field_names(self): |
|
2240 """ |
|
2241 If any fields are marked to be deferred, returns a dictionary mapping |
|
2242 models to a set of names in those fields that will be loaded. If a |
|
2243 model is not in the returned dictionary, none of it's fields are |
|
2244 deferred. |
|
2245 |
|
2246 If no fields are marked for deferral, returns an empty dictionary. |
|
2247 """ |
|
2248 collection = {} |
|
2249 self.deferred_to_data(collection, self.get_loaded_field_names_cb) |
|
2250 return collection |
|
2251 |
|
2252 def get_loaded_field_names_cb(self, target, model, fields): |
|
2253 """ |
|
2254 Callback used by get_deferred_field_names(). |
|
2255 """ |
|
2256 target[model] = set([f.name for f in fields]) |
|
2257 |
|
2258 def set_aggregate_mask(self, names): |
|
2259 "Set the mask of aggregates that will actually be returned by the SELECT" |
|
2260 if names is None: |
|
2261 self.aggregate_select_mask = None |
|
2262 else: |
|
2263 self.aggregate_select_mask = set(names) |
|
2264 self._aggregate_select_cache = None |
|
2265 |
|
2266 def set_extra_mask(self, names): |
|
2267 """ |
|
2268 Set the mask of extra select items that will be returned by SELECT, |
|
2269 we don't actually remove them from the Query since they might be used |
|
2270 later |
|
2271 """ |
|
2272 if names is None: |
|
2273 self.extra_select_mask = None |
|
2274 else: |
|
2275 self.extra_select_mask = set(names) |
|
2276 self._extra_select_cache = None |
|
2277 |
|
2278 def _aggregate_select(self): |
|
2279 """The SortedDict of aggregate columns that are not masked, and should |
|
2280 be used in the SELECT clause. |
|
2281 |
|
2282 This result is cached for optimization purposes. |
|
2283 """ |
|
2284 if self._aggregate_select_cache is not None: |
|
2285 return self._aggregate_select_cache |
|
2286 elif self.aggregate_select_mask is not None: |
|
2287 self._aggregate_select_cache = SortedDict([ |
|
2288 (k,v) for k,v in self.aggregates.items() |
|
2289 if k in self.aggregate_select_mask |
|
2290 ]) |
|
2291 return self._aggregate_select_cache |
|
2292 else: |
|
2293 return self.aggregates |
|
2294 aggregate_select = property(_aggregate_select) |
|
2295 |
|
2296 def _extra_select(self): |
|
2297 if self._extra_select_cache is not None: |
|
2298 return self._extra_select_cache |
|
2299 elif self.extra_select_mask is not None: |
|
2300 self._extra_select_cache = SortedDict([ |
|
2301 (k,v) for k,v in self.extra.items() |
|
2302 if k in self.extra_select_mask |
|
2303 ]) |
|
2304 return self._extra_select_cache |
|
2305 else: |
|
2306 return self.extra |
|
2307 extra_select = property(_extra_select) |
|
2308 |
|
2309 def set_start(self, start): |
|
2310 """ |
|
2311 Sets the table from which to start joining. The start position is |
|
2312 specified by the related attribute from the base model. This will |
|
2313 automatically set to the select column to be the column linked from the |
|
2314 previous table. |
|
2315 |
|
2316 This method is primarily for internal use and the error checking isn't |
|
2317 as friendly as add_filter(). Mostly useful for querying directly |
|
2318 against the join table of many-to-many relation in a subquery. |
|
2319 """ |
|
2320 opts = self.model._meta |
|
2321 alias = self.get_initial_alias() |
|
2322 field, col, opts, joins, last, extra = self.setup_joins( |
|
2323 start.split(LOOKUP_SEP), opts, alias, False) |
|
2324 select_col = self.alias_map[joins[1]][LHS_JOIN_COL] |
|
2325 select_alias = alias |
|
2326 |
|
2327 # The call to setup_joins added an extra reference to everything in |
|
2328 # joins. Reverse that. |
|
2329 for alias in joins: |
|
2330 self.unref_alias(alias) |
|
2331 |
|
2332 # We might be able to trim some joins from the front of this query, |
|
2333 # providing that we only traverse "always equal" connections (i.e. rhs |
|
2334 # is *always* the same value as lhs). |
|
2335 for alias in joins[1:]: |
|
2336 join_info = self.alias_map[alias] |
|
2337 if (join_info[LHS_JOIN_COL] != select_col |
|
2338 or join_info[JOIN_TYPE] != self.INNER): |
|
2339 break |
|
2340 self.unref_alias(select_alias) |
|
2341 select_alias = join_info[RHS_ALIAS] |
|
2342 select_col = join_info[RHS_JOIN_COL] |
|
2343 self.select = [(select_alias, select_col)] |
|
2344 self.remove_inherited_models() |
|
2345 |
|
2346 def execute_sql(self, result_type=MULTI): |
|
2347 """ |
|
2348 Run the query against the database and returns the result(s). The |
|
2349 return value is a single data item if result_type is SINGLE, or an |
|
2350 iterator over the results if the result_type is MULTI. |
|
2351 |
|
2352 result_type is either MULTI (use fetchmany() to retrieve all rows), |
|
2353 SINGLE (only retrieve a single row), or None. In this last case, the |
|
2354 cursor is returned if any query is executed, since it's used by |
|
2355 subclasses such as InsertQuery). It's possible, however, that no query |
|
2356 is needed, as the filters describe an empty set. In that case, None is |
|
2357 returned, to avoid any unnecessary database interaction. |
|
2358 """ |
|
2359 try: |
|
2360 sql, params = self.as_sql() |
|
2361 if not sql: |
|
2362 raise EmptyResultSet |
|
2363 except EmptyResultSet: |
|
2364 if result_type == MULTI: |
|
2365 return empty_iter() |
|
2366 else: |
|
2367 return |
|
2368 cursor = self.connection.cursor() |
|
2369 cursor.execute(sql, params) |
|
2370 |
|
2371 if not result_type: |
|
2372 return cursor |
|
2373 if result_type == SINGLE: |
|
2374 if self.ordering_aliases: |
|
2375 return cursor.fetchone()[:-len(self.ordering_aliases)] |
|
2376 return cursor.fetchone() |
|
2377 |
|
2378 # The MULTI case. |
|
2379 if self.ordering_aliases: |
|
2380 result = order_modified_iter(cursor, len(self.ordering_aliases), |
|
2381 self.connection.features.empty_fetchmany_value) |
|
2382 else: |
|
2383 result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), |
|
2384 self.connection.features.empty_fetchmany_value) |
|
2385 if not self.connection.features.can_use_chunked_reads: |
|
2386 # If we are using non-chunked reads, we return the same data |
|
2387 # structure as normally, but ensure it is all read into memory |
|
2388 # before going any further. |
|
2389 return list(result) |
|
2390 return result |
|
2391 |
|
2392 # Use the backend's custom Query class if it defines one. Otherwise, use the |
|
2393 # default. |
|
2394 if connection.features.uses_custom_query_class: |
|
2395 Query = connection.ops.query_class(BaseQuery) |
|
2396 else: |
|
2397 Query = BaseQuery |
|
2398 |
|
2399 def get_order_dir(field, default='ASC'): |
|
2400 """ |
|
2401 Returns the field name and direction for an order specification. For |
|
2402 example, '-foo' is returned as ('foo', 'DESC'). |
|
2403 |
|
2404 The 'default' param is used to indicate which way no prefix (or a '+' |
|
2405 prefix) should sort. The '-' prefix always sorts the opposite way. |
|
2406 """ |
|
2407 dirn = ORDER_DIR[default] |
|
2408 if field[0] == '-': |
|
2409 return field[1:], dirn[1] |
|
2410 return field, dirn[0] |
|
2411 |
|
2412 def empty_iter(): |
|
2413 """ |
|
2414 Returns an iterator containing no results. |
|
2415 """ |
|
2416 yield iter([]).next() |
|
2417 |
|
2418 def order_modified_iter(cursor, trim, sentinel): |
|
2419 """ |
|
2420 Yields blocks of rows from a cursor. We use this iterator in the special |
|
2421 case when extra output columns have been added to support ordering |
|
2422 requirements. We must trim those extra columns before anything else can use |
|
2423 the results, since they're only needed to make the SQL valid. |
|
2424 """ |
|
2425 for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), |
|
2426 sentinel): |
|
2427 yield [r[:-trim] for r in rows] |
|
2428 |
|
2429 def setup_join_cache(sender, **kwargs): |
|
2430 """ |
|
2431 The information needed to join between model fields is something that is |
|
2432 invariant over the life of the model, so we cache it in the model's Options |
|
2433 class, rather than recomputing it all the time. |
|
2434 |
|
2435 This method initialises the (empty) cache when the model is created. |
|
2436 """ |
|
2437 sender._meta._join_cache = {} |
|
2438 |
|
2439 signals.class_prepared.connect(setup_join_cache) |
|
2440 |
|
2441 def add_to_dict(data, key, value): |
|
2442 """ |
|
2443 A helper function to add "value" to the set of values for "key", whether or |
|
2444 not "key" already exists. |
|
2445 """ |
|
2446 if key in data: |
|
2447 data[key].add(value) |
|
2448 else: |
|
2449 data[key] = set([value]) |
|
2450 |
|
2451 def get_proxied_model(opts): |
|
2452 int_opts = opts |
|
2453 proxied_model = None |
|
2454 while int_opts.proxy: |
|
2455 proxied_model = int_opts.proxy_for_model |
|
2456 int_opts = proxied_model._meta |
|
2457 return proxied_model |