|
1 """ |
|
2 Query subclasses which provide extra functionality beyond simple data retrieval. |
|
3 """ |
|
4 |
|
5 from django.core.exceptions import FieldError |
|
6 from django.db import connections |
|
7 from django.db.models.sql.constants import * |
|
8 from django.db.models.sql.datastructures import Date |
|
9 from django.db.models.sql.expressions import SQLEvaluator |
|
10 from django.db.models.sql.query import Query |
|
11 from django.db.models.sql.where import AND, Constraint |
|
12 |
|
13 __all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery', |
|
14 'AggregateQuery'] |
|
15 |
|
16 class DeleteQuery(Query): |
|
17 """ |
|
18 Delete queries are done through this class, since they are more constrained |
|
19 than general queries. |
|
20 """ |
|
21 |
|
22 compiler = 'SQLDeleteCompiler' |
|
23 |
|
24 def do_query(self, table, where, using): |
|
25 self.tables = [table] |
|
26 self.where = where |
|
27 self.get_compiler(using).execute_sql(None) |
|
28 |
|
29 def delete_batch(self, pk_list, using): |
|
30 """ |
|
31 Set up and execute delete queries for all the objects in pk_list. |
|
32 |
|
33 More than one physical query may be executed if there are a |
|
34 lot of values in pk_list. |
|
35 """ |
|
36 for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): |
|
37 where = self.where_class() |
|
38 field = self.model._meta.pk |
|
39 where.add((Constraint(None, field.column, field), 'in', |
|
40 pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), AND) |
|
41 self.do_query(self.model._meta.db_table, where, using=using) |
|
42 |
|
43 class UpdateQuery(Query): |
|
44 """ |
|
45 Represents an "update" SQL query. |
|
46 """ |
|
47 |
|
48 compiler = 'SQLUpdateCompiler' |
|
49 |
|
50 def __init__(self, *args, **kwargs): |
|
51 super(UpdateQuery, self).__init__(*args, **kwargs) |
|
52 self._setup_query() |
|
53 |
|
54 def _setup_query(self): |
|
55 """ |
|
56 Runs on initialization and after cloning. Any attributes that would |
|
57 normally be set in __init__ should go in here, instead, so that they |
|
58 are also set up after a clone() call. |
|
59 """ |
|
60 self.values = [] |
|
61 self.related_ids = None |
|
62 if not hasattr(self, 'related_updates'): |
|
63 self.related_updates = {} |
|
64 |
|
65 def clone(self, klass=None, **kwargs): |
|
66 return super(UpdateQuery, self).clone(klass, |
|
67 related_updates=self.related_updates.copy(), **kwargs) |
|
68 |
|
69 |
|
70 def clear_related(self, related_field, pk_list, using): |
|
71 """ |
|
72 Set up and execute an update query that clears related entries for the |
|
73 keys in pk_list. |
|
74 |
|
75 This is used by the QuerySet.delete_objects() method. |
|
76 """ |
|
77 for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): |
|
78 self.where = self.where_class() |
|
79 f = self.model._meta.pk |
|
80 self.where.add((Constraint(None, f.column, f), 'in', |
|
81 pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), |
|
82 AND) |
|
83 self.values = [(related_field, None, None)] |
|
84 self.get_compiler(using).execute_sql(None) |
|
85 |
|
86 def add_update_values(self, values): |
|
87 """ |
|
88 Convert a dictionary of field name to value mappings into an update |
|
89 query. This is the entry point for the public update() method on |
|
90 querysets. |
|
91 """ |
|
92 values_seq = [] |
|
93 for name, val in values.iteritems(): |
|
94 field, model, direct, m2m = self.model._meta.get_field_by_name(name) |
|
95 if not direct or m2m: |
|
96 raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field) |
|
97 if model: |
|
98 self.add_related_update(model, field, val) |
|
99 continue |
|
100 values_seq.append((field, model, val)) |
|
101 return self.add_update_fields(values_seq) |
|
102 |
|
103 def add_update_fields(self, values_seq): |
|
104 """ |
|
105 Turn a sequence of (field, model, value) triples into an update query. |
|
106 Used by add_update_values() as well as the "fast" update path when |
|
107 saving models. |
|
108 """ |
|
109 self.values.extend(values_seq) |
|
110 |
|
111 def add_related_update(self, model, field, value): |
|
112 """ |
|
113 Adds (name, value) to an update query for an ancestor model. |
|
114 |
|
115 Updates are coalesced so that we only run one update query per ancestor. |
|
116 """ |
|
117 try: |
|
118 self.related_updates[model].append((field, None, value)) |
|
119 except KeyError: |
|
120 self.related_updates[model] = [(field, None, value)] |
|
121 |
|
122 def get_related_updates(self): |
|
123 """ |
|
124 Returns a list of query objects: one for each update required to an |
|
125 ancestor model. Each query will have the same filtering conditions as |
|
126 the current query but will only update a single table. |
|
127 """ |
|
128 if not self.related_updates: |
|
129 return [] |
|
130 result = [] |
|
131 for model, values in self.related_updates.iteritems(): |
|
132 query = UpdateQuery(model) |
|
133 query.values = values |
|
134 if self.related_ids is not None: |
|
135 query.add_filter(('pk__in', self.related_ids)) |
|
136 result.append(query) |
|
137 return result |
|
138 |
|
139 class InsertQuery(Query): |
|
140 compiler = 'SQLInsertCompiler' |
|
141 |
|
142 def __init__(self, *args, **kwargs): |
|
143 super(InsertQuery, self).__init__(*args, **kwargs) |
|
144 self.columns = [] |
|
145 self.values = [] |
|
146 self.params = () |
|
147 |
|
148 def clone(self, klass=None, **kwargs): |
|
149 extras = { |
|
150 'columns': self.columns[:], |
|
151 'values': self.values[:], |
|
152 'params': self.params |
|
153 } |
|
154 extras.update(kwargs) |
|
155 return super(InsertQuery, self).clone(klass, **extras) |
|
156 |
|
157 def insert_values(self, insert_values, raw_values=False): |
|
158 """ |
|
159 Set up the insert query from the 'insert_values' dictionary. The |
|
160 dictionary gives the model field names and their target values. |
|
161 |
|
162 If 'raw_values' is True, the values in the 'insert_values' dictionary |
|
163 are inserted directly into the query, rather than passed as SQL |
|
164 parameters. This provides a way to insert NULL and DEFAULT keywords |
|
165 into the query, for example. |
|
166 """ |
|
167 placeholders, values = [], [] |
|
168 for field, val in insert_values: |
|
169 placeholders.append((field, val)) |
|
170 self.columns.append(field.column) |
|
171 values.append(val) |
|
172 if raw_values: |
|
173 self.values.extend([(None, v) for v in values]) |
|
174 else: |
|
175 self.params += tuple(values) |
|
176 self.values.extend(placeholders) |
|
177 |
|
178 class DateQuery(Query): |
|
179 """ |
|
180 A DateQuery is a normal query, except that it specifically selects a single |
|
181 date field. This requires some special handling when converting the results |
|
182 back to Python objects, so we put it in a separate class. |
|
183 """ |
|
184 |
|
185 compiler = 'SQLDateCompiler' |
|
186 |
|
187 def add_date_select(self, field, lookup_type, order='ASC'): |
|
188 """ |
|
189 Converts the query into a date extraction query. |
|
190 """ |
|
191 result = self.setup_joins([field.name], self.get_meta(), |
|
192 self.get_initial_alias(), False) |
|
193 alias = result[3][-1] |
|
194 select = Date((alias, field.column), lookup_type) |
|
195 self.select = [select] |
|
196 self.select_fields = [None] |
|
197 self.select_related = False # See #7097. |
|
198 self.set_extra_mask([]) |
|
199 self.distinct = True |
|
200 self.order_by = order == 'ASC' and [1] or [-1] |
|
201 |
|
202 class AggregateQuery(Query): |
|
203 """ |
|
204 An AggregateQuery takes another query as a parameter to the FROM |
|
205 clause and only selects the elements in the provided list. |
|
206 """ |
|
207 |
|
208 compiler = 'SQLAggregateCompiler' |
|
209 |
|
210 def add_subquery(self, query, using): |
|
211 self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True) |