1 #!/usr/bin/env python |
|
2 # coding=utf-8 |
|
3 |
|
4 from lxml import etree |
|
5 from models import setup_database |
|
6 from optparse import OptionParser #@UnresolvedImport |
|
7 from sqlalchemy import Table, Column, BigInteger |
|
8 from utils import (parse_date, set_logging_options, set_logging, get_filter_query, |
|
9 get_logger) |
|
10 import anyjson |
|
11 import datetime |
|
12 import httplib2 |
|
13 import os.path |
|
14 import re |
|
15 import sys |
|
16 import time |
|
17 import uuid #@UnresolvedImport |
|
18 |
|
19 #class TweetExclude(object): |
|
20 # def __init__(self, id): |
|
21 # self.id = id |
|
22 # |
|
23 # def __repr__(self): |
|
24 # return "<TweetExclude(id=%d)>" % (self.id) |
|
25 |
|
26 |
|
27 def parse_polemics(tw, extended_mode): |
|
28 """ |
|
29 parse polemics in text and return a list of polemic code. None if not polemic found |
|
30 """ |
|
31 polemics = {} |
|
32 for m in re.finditer("(\+\+|\-\-|\?\?|\=\=)",tw.text): |
|
33 pol_link = { |
|
34 '++' : u'OK', |
|
35 '--' : u'KO', |
|
36 '??' : u'Q', |
|
37 '==' : u'REF'}[m.group(1)] |
|
38 polemics[pol_link] = pol_link |
|
39 |
|
40 if extended_mode: |
|
41 if "?" in tw.text: |
|
42 polemics["Q"] = "Q" |
|
43 |
|
44 for entity in tw.entity_list: |
|
45 if entity.type == "entity_url": |
|
46 polemics["REF"] = "REF" |
|
47 |
|
48 if len(polemics) > 0: |
|
49 return polemics.keys() |
|
50 else: |
|
51 return None |
|
52 |
|
53 def get_options(): |
|
54 parser = OptionParser() |
|
55 parser.add_option("-f", "--file", dest="filename", |
|
56 help="write export to file", metavar="FILE", default="project.ldt") |
|
57 parser.add_option("-d", "--database", dest="database", |
|
58 help="Input database", metavar="DATABASE") |
|
59 parser.add_option("-s", "--start-date", dest="start_date", |
|
60 help="start date", metavar="START_DATE", default=None) |
|
61 parser.add_option("-e", "--end-date", dest="end_date", |
|
62 help="end date", metavar="END_DATE", default=None) |
|
63 parser.add_option("-I", "--content-file", dest="content_file", |
|
64 help="Content file", metavar="CONTENT_FILE") |
|
65 parser.add_option("-c", "--content", dest="content", |
|
66 help="Content url", metavar="CONTENT") |
|
67 parser.add_option("-V", "--video-url", dest="video", |
|
68 help="video url", metavar="VIDEO") |
|
69 parser.add_option("-i", "--content-id", dest="content_id", |
|
70 help="Content id", metavar="CONTENT_ID") |
|
71 parser.add_option("-x", "--exclude", dest="exclude", |
|
72 help="file containing the id to exclude", metavar="EXCLUDE") |
|
73 parser.add_option("-C", "--color", dest="color", |
|
74 help="Color code", metavar="COLOR", default="16763904") |
|
75 parser.add_option("-H", "--hashtag", dest="hashtag", |
|
76 help="Hashtag", metavar="HASHTAG", default=[], action="append") |
|
77 parser.add_option("-D", "--duration", dest="duration", type="int", |
|
78 help="Duration", metavar="DURATION", default=None) |
|
79 parser.add_option("-n", "--name", dest="name", |
|
80 help="Cutting name", metavar="NAME", default=u"Tweets") |
|
81 parser.add_option("-R", "--replace", dest="replace", action="store_true", |
|
82 help="Replace tweet ensemble", metavar="REPLACE", default=False) |
|
83 parser.add_option("-m", "--merge", dest="merge", action="store_true", |
|
84 help="merge tweet ensemble, choose the first ensemble", metavar="MERGE", default=False) |
|
85 parser.add_option("-L", "--list-conf", dest="listconf", |
|
86 help="list of file to process", metavar="LIST_CONF", default=None) |
|
87 parser.add_option("-E", "--extended", dest="extended_mode", action="store_true", |
|
88 help="Trigger polemic extended mode", metavar="EXTENDED", default=False) |
|
89 parser.add_option("--user-whitelist", dest="user_whitelist", action="store", |
|
90 help="A list of user screen name", metavar="USER_WHITELIST",default=None) |
|
91 |
|
92 |
|
93 set_logging_options(parser) |
|
94 |
|
95 |
|
96 return parser.parse_args() + (parser,) |
|
97 |
|
98 |
|
99 if __name__ == "__main__" : |
|
100 |
|
101 (options, args, parser) = get_options() |
|
102 |
|
103 set_logging(options) |
|
104 |
|
105 get_logger().debug("OPTIONS : " + repr(options)) #@UndefinedVariable |
|
106 |
|
107 if len(sys.argv) == 1 or options.database is None: |
|
108 parser.print_help() |
|
109 sys.exit(1) |
|
110 |
|
111 conn_str = options.database.strip() |
|
112 if not re.match("^\w+://.+", conn_str): |
|
113 conn_str = 'sqlite:///' + conn_str |
|
114 |
|
115 engine, metadata, Session = setup_database(conn_str, echo=((options.verbose-options.quiet)>0), create_all = False) |
|
116 conn = None |
|
117 try : |
|
118 conn = engine.connect() |
|
119 session = None |
|
120 try : |
|
121 session = Session(bind=conn) |
|
122 tweet_exclude_table = Table("tweet_exclude", metadata, Column('id', BigInteger, primary_key=True), prefixes=['TEMPORARY']) |
|
123 #mapper(TweetExclude, tweet_exclude_table) |
|
124 metadata.create_all(bind=conn, tables=[tweet_exclude_table]) |
|
125 |
|
126 if options.exclude and os.path.exists(options.exclude): |
|
127 with open(options.exclude, 'r+') as f: |
|
128 tei = tweet_exclude_table.insert() |
|
129 for line in f: |
|
130 conn.execute(tei.values(id=long(line.strip()))) |
|
131 user_whitelist_file = options.user_whitelist |
|
132 user_whitelist = None |
|
133 |
|
134 if options.listconf: |
|
135 |
|
136 parameters = [] |
|
137 confdoc = etree.parse(options.listconf) |
|
138 for node in confdoc.xpath("/twitter_export/file"): |
|
139 params = {} |
|
140 for snode in node: |
|
141 if snode.tag == "path": |
|
142 params['content_file'] = snode.text |
|
143 elif snode.tag == "start_date": |
|
144 params['start_date'] = snode.text |
|
145 elif snode.tag == "end_date": |
|
146 params['end_date'] = snode.text |
|
147 elif snode.tag == "duration": |
|
148 params['duration'] = int(snode.text) |
|
149 elif snode.tag == "hashtags": |
|
150 params['hashtags'] = [snode.text] |
|
151 if options.hashtag or 'hashtags' not in params : |
|
152 params['hashtags'] = options.hashtag |
|
153 parameters.append(params) |
|
154 else: |
|
155 parameters = [{ |
|
156 'start_date': options.start_date, |
|
157 'end_date' : options.end_date, |
|
158 'duration' : options.duration, |
|
159 'content_file' : options.content_file, |
|
160 'hashtags' : options.hashtag |
|
161 }] |
|
162 |
|
163 for params in parameters: |
|
164 |
|
165 get_logger().debug("PARAMETERS " + repr(params)) #@UndefinedVariable |
|
166 |
|
167 start_date_str = params.get("start_date",None) |
|
168 end_date_str = params.get("end_date", None) |
|
169 duration = params.get("duration", None) |
|
170 content_file = params.get("content_file", None) |
|
171 hashtags = params.get('hashtags', []) |
|
172 |
|
173 if user_whitelist_file: |
|
174 with open(user_whitelist_file, 'r+') as f: |
|
175 user_whitelist = list(set([s.strip() for s in f])) |
|
176 |
|
177 start_date = None |
|
178 ts = None |
|
179 if start_date_str: |
|
180 start_date = parse_date(start_date_str) |
|
181 ts = time.mktime(start_date.timetuple()) |
|
182 |
|
183 end_date = None |
|
184 if end_date_str: |
|
185 end_date = parse_date(end_date_str) |
|
186 elif start_date and duration: |
|
187 end_date = start_date + datetime.timedelta(seconds=duration) |
|
188 |
|
189 query = get_filter_query(session, start_date, end_date, hashtags, tweet_exclude_table, user_whitelist) |
|
190 |
|
191 query_res = query.all() |
|
192 |
|
193 root = None |
|
194 ensemble_parent = None |
|
195 |
|
196 #to do : analyse situation ldt or iri ? filename set or not ? |
|
197 |
|
198 if content_file and content_file.find("http") == 0: |
|
199 |
|
200 get_logger().debug("url : " + content_file) #@UndefinedVariable |
|
201 |
|
202 h = httplib2.Http() |
|
203 resp, content = h.request(content_file) |
|
204 |
|
205 get_logger().debug("url response " + repr(resp) + " content " + repr(content)) #@UndefinedVariable |
|
206 |
|
207 project = anyjson.deserialize(content) |
|
208 root = etree.fromstring(project["ldt"]) |
|
209 |
|
210 elif content_file and os.path.exists(content_file): |
|
211 |
|
212 doc = etree.parse(content_file) |
|
213 root = doc.getroot() |
|
214 |
|
215 |
|
216 if root is None: |
|
217 |
|
218 root = etree.Element(u"iri") |
|
219 |
|
220 project = etree.SubElement(root, u"project", {u"abstract":u"Polemics Tweets",u"title":u"Polemic Tweets", u"user":u"IRI Web", u"id":unicode(uuid.uuid4())}) |
|
221 |
|
222 medias = etree.SubElement(root, u"medias") |
|
223 media = etree.SubElement(medias, u"media", {u"pict":u"", u"src":unicode(options.content), u"video":unicode(options.video), u"id":unicode(options.content_id), u"extra":u""}) |
|
224 |
|
225 annotations = etree.SubElement(root, u"annotations") |
|
226 content = etree.SubElement(annotations, u"content", {u"id":unicode(options.content_id)}) |
|
227 ensemble_parent = content |
|
228 |
|
229 |
|
230 if ensemble_parent is None: |
|
231 file_type = None |
|
232 for node in root: |
|
233 if node.tag == "project": |
|
234 file_type = "ldt" |
|
235 break |
|
236 elif node.tag == "head": |
|
237 file_type = "iri" |
|
238 break |
|
239 |
|
240 if file_type == "ldt": |
|
241 media_nodes = root.xpath("//media") |
|
242 if len(media_nodes) > 0: |
|
243 media = media_nodes[0] |
|
244 annotations_node = root.find(u"annotations") |
|
245 if annotations_node is None: |
|
246 annotations_node = etree.SubElement(root, u"annotations") |
|
247 content_node = annotations_node.find(u"content") |
|
248 if content_node is None: |
|
249 content_node = etree.SubElement(annotations_node,u"content", id=media.get(u"id")) |
|
250 ensemble_parent = content_node |
|
251 elif file_type == "iri": |
|
252 body_node = root.find(u"body") |
|
253 if body_node is None: |
|
254 body_node = etree.SubElement(root, u"body") |
|
255 ensembles_node = body_node.find(u"ensembles") |
|
256 if ensembles_node is None: |
|
257 ensembles_node = etree.SubElement(body_node, u"ensembles") |
|
258 ensemble_parent = ensembles_node |
|
259 |
|
260 |
|
261 if ensemble_parent is None: |
|
262 get_logger().error("Can not process file") #@UndefinedVariable |
|
263 sys.exit() |
|
264 |
|
265 if options.replace: |
|
266 for ens in ensemble_parent.iterchildren(tag=u"ensemble"): |
|
267 if ens.get("id","").startswith("tweet_"): |
|
268 ensemble_parent.remove(ens) |
|
269 |
|
270 ensemble = None |
|
271 elements = None |
|
272 |
|
273 if options.merge: |
|
274 ensemble = ensemble_parent.find(u"ensemble") |
|
275 if ensemble is not None: |
|
276 elements = ensemble.find(u".//elements") |
|
277 |
|
278 if ensemble is None or elements is None: |
|
279 ensemble = etree.SubElement(ensemble_parent, u"ensemble", {u"id":u"tweet_" + unicode(uuid.uuid4()), u"title":u"Ensemble Twitter", u"author":u"IRI Web", u"abstract":u"Ensemble Twitter"}) |
|
280 decoupage = etree.SubElement(ensemble, u"decoupage", {u"id": unicode(uuid.uuid4()), u"author": u"IRI Web"}) |
|
281 |
|
282 etree.SubElement(decoupage, u"title").text = unicode(options.name) |
|
283 etree.SubElement(decoupage, u"abstract").text = unicode(options.name) |
|
284 |
|
285 elements = etree.SubElement(decoupage, u"elements") |
|
286 |
|
287 |
|
288 for tw in query_res: |
|
289 tweet_ts_dt = tw.created_at |
|
290 tweet_ts = int(time.mktime(tweet_ts_dt.timetuple())) |
|
291 if ts is None: |
|
292 ts = tweet_ts |
|
293 tweet_ts_rel = (tweet_ts-ts) * 1000 |
|
294 username = None |
|
295 profile_url = "" |
|
296 if tw.user is not None: |
|
297 username = tw.user.name |
|
298 profile_url = tw.user.profile_image_url if tw.user.profile_image_url is not None else "" |
|
299 if not username: |
|
300 username = "anon." |
|
301 |
|
302 element = etree.SubElement(elements, u"element" , {u"id":unicode(uuid.uuid4())+u"-"+unicode(tw.id), u"color":unicode(options.color), u"author":unicode(username), u"date":unicode(tweet_ts_dt.strftime("%Y/%m/%d")), u"begin": unicode(tweet_ts_rel), u"dur":u"0", u"src":unicode(profile_url)}) |
|
303 etree.SubElement(element, u"title").text = unicode(username) + u": " + unicode(tw.text) |
|
304 etree.SubElement(element, u"abstract").text = unicode(tw.text) |
|
305 |
|
306 tags_node = etree.SubElement(element, u"tags") |
|
307 |
|
308 for entity in tw.entity_list: |
|
309 if entity.type == u'entity_hashtag': |
|
310 etree.SubElement(tags_node,u"tag").text = entity.hashtag.text |
|
311 |
|
312 meta_element = etree.SubElement(element, u'meta') |
|
313 |
|
314 polemics_list = parse_polemics(tw, options.extended_mode) |
|
315 if polemics_list: |
|
316 polemics_element = etree.Element(u'polemics') |
|
317 for pol in polemics_list: |
|
318 etree.SubElement(polemics_element, u'polemic').text = pol |
|
319 meta_element.append(polemics_element) |
|
320 |
|
321 etree.SubElement(meta_element, u"source", attrib={"url":u"http://dev.twitter.com", "mimetype":u"application/json"}).text = etree.CDATA(unicode(tw.tweet_source.original_json)) |
|
322 |
|
323 # sort by tc in |
|
324 if options.merge : |
|
325 # remove all elements and put them in a array |
|
326 # sort them with tc |
|
327 #put them back |
|
328 elements[:] = sorted(elements,key=lambda n: int(n.get('begin'))) |
|
329 |
|
330 |
|
331 |
|
332 |
|
333 output_data = etree.tostring(root, encoding="utf-8", method="xml", pretty_print=True, xml_declaration=True) |
|
334 |
|
335 if content_file and content_file.find("http") == 0: |
|
336 |
|
337 project["ldt"] = output_data |
|
338 body = anyjson.serialize(project) |
|
339 get_logger().debug("write http " + content_file) #@UndefinedVariable |
|
340 get_logger().debug("write http " + repr(body)) #@UndefinedVariable |
|
341 h = httplib2.Http() |
|
342 resp, content = h.request(content_file, "PUT", headers={'content-type':'application/json'}, body=body) |
|
343 get_logger().debug("write http " + repr(resp) + " content " + content) #@UndefinedVariable |
|
344 else: |
|
345 if content_file and os.path.exists(content_file): |
|
346 dest_file_name = content_file |
|
347 else: |
|
348 dest_file_name = options.filename |
|
349 |
|
350 get_logger().debug("WRITE : " + dest_file_name) #@UndefinedVariable |
|
351 output = open(dest_file_name, "w") |
|
352 output.write(output_data) |
|
353 output.flush() |
|
354 output.close() |
|
355 |
|
356 finally: |
|
357 if session: |
|
358 session.close() |
|
359 finally: |
|
360 if conn: |
|
361 conn.close() |
|