|
1 #!/usr/bin/env python |
|
2 # coding=utf-8 |
|
3 |
|
4 import argparse |
|
5 import bisect |
|
6 import datetime |
|
7 import json |
|
8 import os.path |
|
9 import re |
|
10 import sys |
|
11 import uuid # @UnresolvedImport |
|
12 |
|
13 import requests |
|
14 |
|
15 import dateutil.tz |
|
16 from dateutil.parser import parse as parse_date_raw |
|
17 from dateutil.tz import tzutc |
|
18 from iri_tweet.utils import get_logger, set_logging, set_logging_options |
|
19 from lxml import etree |
|
20 |
|
21 #class TweetExclude(object): |
|
22 # def __init__(self, id): |
|
23 # self.id = id |
|
24 # |
|
25 # def __repr__(self): |
|
26 # return "<TweetExclude(id=%d)>" % (self.id) |
|
27 |
|
28 LDT_CONTENT_REST_API_PATH = "api/ldt/1.0/contents/" |
|
29 LDT_PROJECT_REST_API_PATH = "api/ldt/1.0/projects/" |
|
30 |
|
31 def parse_date(datestr, default_tz, default=None): |
|
32 res = parse_date_raw(datestr, default=default) |
|
33 if res.tzinfo is None: |
|
34 res = res.replace(tzinfo=default_tz) |
|
35 return res |
|
36 |
|
37 |
|
38 def re_fn(expr, item): |
|
39 reg = re.compile(expr, re.I) |
|
40 res = reg.search(item) |
|
41 if res: |
|
42 get_logger().debug("re_fn : " + repr(expr) + "~" + repr(item)) #@UndefinedVariable |
|
43 return res is not None |
|
44 |
|
45 def parse_polemics_1(tw_text, extended_mode): |
|
46 """ |
|
47 parse polemics in text and return a list of polemic code. None if not polemic found |
|
48 """ |
|
49 polemics = {} |
|
50 for m in re.finditer(r"(\+\+|\-\-|\?\?|\=\=)",tw_text): |
|
51 pol_link = { |
|
52 '++' : 'OK', |
|
53 '--' : 'KO', |
|
54 '??' : 'Q', |
|
55 '==' : 'REF'}[m.group(1)] |
|
56 polemics[pol_link] = pol_link |
|
57 |
|
58 if extended_mode: |
|
59 if "?" in tw_text: |
|
60 polemics["Q"] = "Q" |
|
61 |
|
62 if len(polemics) > 0: |
|
63 return polemics.keys() |
|
64 else: |
|
65 return None |
|
66 |
|
67 def parse_polemics_2(tw_text, extended_mode): |
|
68 """ |
|
69 parse polemics in text and return a list of polemic code. None if not polemic found |
|
70 """ |
|
71 polemics = {} |
|
72 for m in re.finditer(r"(\+\+|\!\!|\?\?|\=\=)",tw_text): |
|
73 pol_link = { |
|
74 '++' : 'OK', |
|
75 '!!' : 'KO', |
|
76 '??' : 'Q', |
|
77 '==' : 'REF'}[m.group(1)] |
|
78 polemics[pol_link] = pol_link |
|
79 |
|
80 if extended_mode: |
|
81 if "?" in tw_text: |
|
82 polemics["Q"] = "Q" |
|
83 |
|
84 |
|
85 if len(polemics) > 0: |
|
86 return polemics.keys() |
|
87 else: |
|
88 return None |
|
89 |
|
90 def parse_polemics_3(tw_text, extended_mode): |
|
91 """ |
|
92 parse polemics in text and return a list of polemic code. None if not polemic found |
|
93 """ |
|
94 polemics = {} |
|
95 for m in re.finditer(r"(\+\+|\?\?|\*\*|\=\=)",tw_text): |
|
96 pol_link = { |
|
97 '++' : 'OK', |
|
98 '??' : 'KO', |
|
99 '**' : 'REF', |
|
100 '==' : 'Q'}[m.group(1)] |
|
101 polemics[pol_link] = pol_link |
|
102 |
|
103 if len(polemics) > 0: |
|
104 return polemics.keys() |
|
105 else: |
|
106 return None |
|
107 |
|
108 |
|
109 protocol_version_map = { |
|
110 "1" : parse_polemics_1, |
|
111 "2" : parse_polemics_2, |
|
112 "3" : parse_polemics_3 |
|
113 } |
|
114 |
|
115 def get_options(): |
|
116 |
|
117 parser = argparse.ArgumentParser(description="All date should be given using iso8601 format. If no timezone is used, the date is considered as UTC") |
|
118 |
|
119 parser.add_argument("-f", "--file", dest="filename", |
|
120 help="write export to file", metavar="FILE", default="project.ldt") |
|
121 parser.add_argument("-d", "--chat-database", dest="database", |
|
122 help="Input chat file", metavar="CHAT_DATABASE") |
|
123 parser.add_argument("-a", "--annotation-protocol", dest="protocol_version", |
|
124 help="annotation protocol version", metavar="PROTOCOL_VERSION", |
|
125 default="2") |
|
126 parser.add_argument("-s", "--start-date", dest="start_date", |
|
127 help="start date", metavar="START_DATE", default=None) |
|
128 parser.add_argument("-e", "--end-date", dest="end_date", |
|
129 help="end date", metavar="END_DATE", default=None) |
|
130 parser.add_argument("-I", "--content-file", dest="content_file", |
|
131 help="Content file", metavar="CONTENT_FILE") |
|
132 parser.add_argument("-c", "--content", dest="content", |
|
133 help="Content url", metavar="CONTENT") |
|
134 parser.add_argument("-V", "--video-url", dest="video", |
|
135 help="video url", metavar="VIDEO") |
|
136 parser.add_argument("-i", "--content-id", dest="content_id", |
|
137 help="Content id", metavar="CONTENT_ID") |
|
138 parser.add_argument("-x", "--exclude", dest="exclude", |
|
139 help="file containing the id to exclude", metavar="EXCLUDE") |
|
140 parser.add_argument("-C", "--color", dest="color", |
|
141 help="Color code", metavar="COLOR", default="16763904") |
|
142 parser.add_argument("-H", "--hashtag", dest="hashtag", |
|
143 help="Hashtag", metavar="HASHTAG", default=[], action="append") |
|
144 parser.add_argument("-D", "--duration", dest="duration", type=int, |
|
145 help="Duration", metavar="DURATION", default=None) |
|
146 parser.add_argument("-n", "--name", dest="name", |
|
147 help="Cutting name", metavar="NAME", default="Chats") |
|
148 parser.add_argument("-R", "--replace", dest="replace", action="store_true", |
|
149 help="Replace tweet ensemble", default=False) |
|
150 parser.add_argument("-m", "--merge", dest="merge", action="store_true", |
|
151 help="merge tweet ensemble, choose the first ensemble", default=False) |
|
152 parser.add_argument("-L", "--list-conf", dest="listconf", |
|
153 help="list of file to process", metavar="LIST_CONF", default=None) |
|
154 parser.add_argument("-E", "--extended", dest="extended_mode", action="store_true", |
|
155 help="Trigger polemic extended mode", default=False) |
|
156 parser.add_argument("-b", "--base-url", dest="base_url", |
|
157 help="base URL of the platform", metavar="BASE_URL", default="http://ldt.iri.centrepompidou.fr/ldtplatform/") |
|
158 parser.add_argument("-p", "--project", dest="project_id", |
|
159 help="Project id", metavar="PROJECT_ID", default=None) |
|
160 parser.add_argument("-P", "--post-param", dest="post_param", |
|
161 help="Post param", metavar="POST_PARAM", default=None) |
|
162 parser.add_argument("--user-whitelist", dest="user_whitelist", action="store", |
|
163 help="A list of user screen name", metavar="USER_WHITELIST",default=None) |
|
164 parser.add_argument("--cut", dest="cuts", action="append", |
|
165 help="A cut with the forma <ts in ms>::<duration>", metavar="CUT", default=[]) |
|
166 parser.add_argument("-Z","--tz", dest="timezone", |
|
167 help="The timezone of the timestamps", metavar="TZ", default="UTC") |
|
168 |
|
169 |
|
170 set_logging_options(parser) |
|
171 |
|
172 return (parser.parse_args(), parser) |
|
173 |
|
174 |
|
175 def find_delta(deltas, ts): |
|
176 i = bisect.bisect_right(deltas, (ts+1,0)) |
|
177 if i: |
|
178 return deltas[i-1] |
|
179 return (0,0) |
|
180 |
|
181 |
|
182 def parse_duration(s): |
|
183 try: |
|
184 return int(s) |
|
185 except ValueError: |
|
186 parts = s.split(":") |
|
187 if len(parts) < 2: |
|
188 raise ValueError("Bad duration format") |
|
189 time_params = { |
|
190 'hours': int(parts[0]), |
|
191 'minutes': int(parts[1]), |
|
192 'seconds': int(parts[2]) if len(parts)>2 else 0 |
|
193 } |
|
194 return int(round(datetime.timedelta(**time_params).total_seconds()*1000)) |
|
195 |
|
196 CHAT_REGEXP = re.compile(r"^(?P<created_at>\d{2}:\d{2}:\d{2})\t\sFrom\s{2}(?P<user>.+?)\s:\s(?P<text>.*)$", re.DOTALL) |
|
197 CHAT_LINE_REGEXP = re.compile(r"^\d{2}:\d{2}:\d{2}\t\sFrom\s{2}.+?\s:") |
|
198 |
|
199 def parse_chat_line(chat_id, chat_line): |
|
200 if (m := CHAT_REGEXP.match(chat_line)) is not None: |
|
201 res = {k: v.replace('\r','\n') if k == 'text' else v for k,v in m.groupdict().items()} |
|
202 res['id'] = chat_id |
|
203 res['tags'] = re.findall('#(\w+)',res['text']) |
|
204 return res |
|
205 else: |
|
206 return {} |
|
207 |
|
208 def read_chat_file(chat_file_path): |
|
209 current_line = "" |
|
210 chat_content = [] |
|
211 with open(chat_file_path, "r") as chat_file: |
|
212 for chat_line in chat_file: |
|
213 if CHAT_LINE_REGEXP.match(chat_line) is not None: |
|
214 if current_line: |
|
215 chat_content.append(current_line) |
|
216 current_line = chat_line |
|
217 else: |
|
218 current_line = current_line + "\n" + chat_line |
|
219 if current_line: |
|
220 chat_content.append(current_line) |
|
221 return chat_content |
|
222 |
|
223 |
|
224 if __name__ == "__main__" : |
|
225 |
|
226 (options, parser) = get_options() |
|
227 |
|
228 set_logging(options) |
|
229 |
|
230 get_logger().debug("OPTIONS : " + repr(options)) #@UndefinedVariable |
|
231 |
|
232 |
|
233 deltas = [(0,0)] |
|
234 total_delta = 0 |
|
235 if options.cuts: |
|
236 cuts_raw = sorted([tuple([parse_duration(s) for s in c.split("::")]) for c in options.cuts]) |
|
237 for c, d in cuts_raw: |
|
238 deltas.append((c+total_delta, -1)) |
|
239 total_delta += d |
|
240 deltas.append((c+total_delta, total_delta)) |
|
241 |
|
242 if len(sys.argv) == 1 or options.database is None: |
|
243 parser.print_help() |
|
244 sys.exit(1) |
|
245 |
|
246 user_whitelist_file = options.user_whitelist |
|
247 user_whitelist = None |
|
248 |
|
249 if options.listconf: |
|
250 |
|
251 parameters = [] |
|
252 confdoc = etree.parse(options.listconf) |
|
253 for node in confdoc.xpath("/zoom_export/file"): |
|
254 params = {} |
|
255 for snode in node: |
|
256 if snode.tag == "path": |
|
257 params['content_file'] = snode.text |
|
258 params['content_file_write'] = snode.text |
|
259 elif snode.tag == "project_id": |
|
260 params['content_file'] = options.base_url + LDT_PROJECT_REST_API_PATH + snode.text + "/?format=json" |
|
261 params['content_file_write'] = options.base_url + LDT_PROJECT_REST_API_PATH + snode.text + "/?format=json" |
|
262 params['project_id'] = snode.text |
|
263 elif snode.tag == "start_date": |
|
264 params['start_date'] = snode.text |
|
265 elif snode.tag == "end_date": |
|
266 params['end_date'] = snode.text |
|
267 elif snode.tag == "duration": |
|
268 params['duration'] = int(snode.text) |
|
269 elif snode.tag == "hashtags": |
|
270 params['hashtags'] = [snode.text] |
|
271 if options.hashtag or 'hashtags' not in params : |
|
272 params['hashtags'] = options.hashtag |
|
273 parameters.append(params) |
|
274 else: |
|
275 if options.project_id: |
|
276 content_file = options.base_url + LDT_PROJECT_REST_API_PATH + options.project_id + "/?format=json" |
|
277 else: |
|
278 content_file = options.content_file |
|
279 parameters = [{ |
|
280 'start_date': options.start_date, |
|
281 'end_date' : options.end_date, |
|
282 'duration' : options.duration, |
|
283 'content_file' : content_file, |
|
284 'content_file_write' : content_file, |
|
285 'hashtags' : options.hashtag, |
|
286 'project_id' : options.project_id |
|
287 }] |
|
288 post_param = {} |
|
289 if options.post_param: |
|
290 post_param = json.loads(options.post_param) |
|
291 |
|
292 item_tz = dateutil.tz.UTC |
|
293 if options.timezone: |
|
294 item_tz = dateutil.tz.gettz(options.timezone) |
|
295 get_logger().debug("TIMEZONE " + options.timezone + " PARSED :: " + repr(item_tz)) |
|
296 |
|
297 if item_tz is None: |
|
298 get_logger().error("Timezone '%s' not recognized.", options.timezone) |
|
299 print("Error: Timezone '%s' not recognized." % options.timezone) |
|
300 parser.print_help() |
|
301 sys.exit(1) |
|
302 |
|
303 display_content_node = None |
|
304 for params in parameters: |
|
305 |
|
306 get_logger().debug("PARAMETERS " + repr(params)) #@UndefinedVariable |
|
307 |
|
308 start_date_str = params.get("start_date",None) |
|
309 end_date_str = params.get("end_date", None) |
|
310 duration = params.get("duration", None) |
|
311 content_file = params.get("content_file", None) |
|
312 content_file_write = params.get("content_file_write", None) |
|
313 hashtags = list(set(params.get('hashtags', []))) |
|
314 |
|
315 if user_whitelist_file: |
|
316 with open(user_whitelist_file, 'r+') as f: |
|
317 user_whitelist = list(set([s.strip() for s in f])) |
|
318 |
|
319 start_date = None |
|
320 if start_date_str: |
|
321 start_date = parse_date(start_date_str, item_tz) |
|
322 |
|
323 root = None |
|
324 ensemble_parent = None |
|
325 project = None |
|
326 |
|
327 #to do : analyse situation ldt or iri ? filename set or not ? |
|
328 |
|
329 if content_file and content_file.find("http") == 0: |
|
330 |
|
331 get_logger().debug("url : " + content_file) #@UndefinedVariable |
|
332 |
|
333 r = requests.get(content_file, params=post_param) |
|
334 get_logger().debug("url response " + repr(r) + " content " + repr(r.text)) #@UndefinedVariable |
|
335 project = r.json() |
|
336 text_match = re.match(r"\<\?\s*xml.*?\?\>(.*)", project['ldt'], re.I|re.S) |
|
337 root = etree.fromstring(text_match.group(1) if text_match else project['ldt']) |
|
338 |
|
339 elif content_file and os.path.exists(content_file): |
|
340 |
|
341 doc = etree.parse(content_file) |
|
342 root = doc.getroot() |
|
343 for child in root: |
|
344 if child.tag == "project": |
|
345 project = child |
|
346 break |
|
347 if project is None: |
|
348 root = None |
|
349 |
|
350 content_id = None |
|
351 |
|
352 if root is None: |
|
353 |
|
354 root = etree.Element("iri") |
|
355 |
|
356 project = etree.SubElement(root, "project", {"abstract":"Polemics Chat","title":"Polemic Chat", "user":"IRI Web", "id":str(uuid.uuid4())}) |
|
357 |
|
358 medias = etree.SubElement(root, "medias") |
|
359 media = etree.SubElement(medias, "media", {"pict":"", "src":options.content, "video":options.video, "id":options.content_id, "extra":""}) |
|
360 |
|
361 annotations = etree.SubElement(root, "annotations") |
|
362 content = etree.SubElement(annotations, "content", {"id":options.content_id}) |
|
363 ensemble_parent = content |
|
364 |
|
365 content_id = options.content_id |
|
366 |
|
367 |
|
368 if ensemble_parent is None: |
|
369 file_type = None |
|
370 for node in root: |
|
371 if node.tag == "project": |
|
372 file_type = "ldt" |
|
373 break |
|
374 elif node.tag == "head": |
|
375 file_type = "iri" |
|
376 break |
|
377 |
|
378 if file_type == "ldt": |
|
379 media_nodes = root.xpath("//media") |
|
380 media = None |
|
381 if len(media_nodes) > 0: |
|
382 media = media_nodes[0] |
|
383 annotations_node = root.find("annotations") |
|
384 if annotations_node is None: |
|
385 annotations_node = etree.SubElement(root, "annotations") |
|
386 content_node = annotations_node.find("content") |
|
387 if content_node is None and media is not None: |
|
388 content_node = etree.SubElement(annotations_node,"content", id=media.get("id")) |
|
389 ensemble_parent = content_node |
|
390 content_id = content_node.get("id") |
|
391 display_nodes = root.xpath("//displays/display/content[@id='%s']" % content_id) |
|
392 if len(display_nodes) == 0: |
|
393 get_logger().info("No display node found. Will not update display") |
|
394 display_content_node = None |
|
395 else: |
|
396 display_content_node = display_nodes[0] |
|
397 |
|
398 elif file_type == "iri": |
|
399 body_node = root.find("body") |
|
400 if body_node is None: |
|
401 body_node = etree.SubElement(root, "body") |
|
402 ensembles_node = body_node.find("ensembles") |
|
403 if ensembles_node is None: |
|
404 ensembles_node = etree.SubElement(body_node, "ensembles") |
|
405 ensemble_parent = ensembles_node |
|
406 content_id = root.xpath("head/meta[@name='id']/@content")[0] |
|
407 display_content_node = None |
|
408 |
|
409 |
|
410 if ensemble_parent is None: |
|
411 get_logger().error("Can not process file") #@UndefinedVariable |
|
412 sys.exit() |
|
413 |
|
414 if options.replace: |
|
415 for ens in ensemble_parent.iterchildren(tag="ensemble"): |
|
416 ens_id = ens.get("id","") |
|
417 if ens_id.startswith("chat_"): |
|
418 ensemble_parent.remove(ens) |
|
419 # remove in display nodes |
|
420 if display_content_node is not None: |
|
421 for cut_display in display_content_node.iterchildren(): |
|
422 if cut_display.get('idens','') == ens_id: |
|
423 display_content_node.remove(cut_display) |
|
424 |
|
425 ensemble = None |
|
426 elements = None |
|
427 decoupage = None |
|
428 |
|
429 if options.merge: |
|
430 for ens in ensemble_parent.findall("ensemble"): |
|
431 if ens.get('id',"").startswith("chat_"): |
|
432 ensemble = ens |
|
433 break |
|
434 if ensemble is not None: |
|
435 elements = ensemble.find(".//elements") |
|
436 decoupage = ensemble.find("decoupage") |
|
437 |
|
438 if ensemble is None or elements is None: |
|
439 ensemble = etree.SubElement(ensemble_parent, "ensemble", {"id":"chat_" + str(uuid.uuid4()), "title":"Ensemble Chat", "author":"IRI Web", "abstract":"Ensemble Chat"}) |
|
440 decoupage = etree.SubElement(ensemble, "decoupage", {"id": str(uuid.uuid4()), "author": "IRI Web"}) |
|
441 |
|
442 etree.SubElement(decoupage, "title").text = options.name |
|
443 etree.SubElement(decoupage, "abstract").text = options.name |
|
444 |
|
445 elements = etree.SubElement(decoupage, "elements") |
|
446 |
|
447 ensemble_id = ensemble.get('id', '') |
|
448 decoupage_id = decoupage.get('id', '') if decoupage is not None else None |
|
449 |
|
450 end_date = None |
|
451 if end_date_str: |
|
452 end_date = parse_date(end_date_str, item_tz) |
|
453 elif start_date and duration: |
|
454 end_date = start_date + datetime.timedelta(seconds=duration) |
|
455 elif start_date and options.base_url: |
|
456 # get duration from api |
|
457 content_url = options.base_url + LDT_CONTENT_REST_API_PATH + content_id + "/?format=json" |
|
458 r = requests.get(content_url) |
|
459 duration = int(r.json()['duration']) |
|
460 get_logger().debug("get duration " + content_url) #@UndefinedVariable |
|
461 get_logger().debug("get duration " + repr(duration)) #@UndefinedVariable |
|
462 |
|
463 end_date = start_date + datetime.timedelta(seconds=int(duration/1000)) |
|
464 |
|
465 if end_date and deltas: |
|
466 end_date = end_date + datetime.timedelta(milliseconds=deltas[-1][1]) |
|
467 |
|
468 chat_content_lines = read_chat_file(options.database.strip()) |
|
469 for i,chat_line in enumerate(chat_content_lines): |
|
470 |
|
471 cht = parse_chat_line("%04d" % (i+1) ,chat_line.strip()) |
|
472 |
|
473 #TODO parse chat line |
|
474 cht_ts_dt = cht['created_at'] |
|
475 default_date = start_date or datetime.now() |
|
476 cht_ts = parse_date(cht_ts_dt, item_tz, default_date.replace(tzinfo=item_tz)) |
|
477 if start_date is None: |
|
478 start_date = cht_ts |
|
479 cht_ts_rel = cht_ts-start_date |
|
480 cht_ts_rel_milli = int(round(cht_ts_rel.total_seconds() * 1000)) |
|
481 if deltas: |
|
482 d = find_delta(deltas, cht_ts_rel_milli) |
|
483 if d[1] < 0: |
|
484 continue |
|
485 else : |
|
486 cht_ts_rel_milli -= d[1] |
|
487 |
|
488 username = cht['user'] or "anon." |
|
489 |
|
490 element = etree.SubElement(elements, "element" , {"id": "%s-%s" % (uuid.uuid4(),cht['id']), "color":options.color, "author":username, "date":cht_ts.strftime("%Y/%m/%d"), "begin": str(cht_ts_rel_milli), "dur":"0", "src":"zoom"}) |
|
491 etree.SubElement(element, "title").text = username + ": " + cht['text'][:255] |
|
492 etree.SubElement(element, "abstract").text = cht['text'] |
|
493 |
|
494 tags_node = etree.SubElement(element, "tags") |
|
495 |
|
496 for tag in cht['tags']: |
|
497 etree.SubElement(tags_node,"tag").text = tag |
|
498 |
|
499 meta_element = etree.SubElement(element, 'meta') |
|
500 |
|
501 etree.SubElement(meta_element, "polemic_version").text = options.protocol_version |
|
502 parse_polemics = protocol_version_map.get(options.protocol_version, parse_polemics_2) |
|
503 polemics_list = parse_polemics(cht['text'], options.extended_mode) |
|
504 if polemics_list: |
|
505 polemics_element = etree.Element('polemics') |
|
506 for pol in polemics_list: |
|
507 etree.SubElement(polemics_element, 'polemic').text = pol |
|
508 meta_element.append(polemics_element) |
|
509 |
|
510 etree.SubElement(meta_element, "source", attrib={"url":"http://zoom.io", "mimetype":"text/plain"}).text = etree.CDATA(json.dumps({'chat': chat_line})) |
|
511 |
|
512 # sort by tc in |
|
513 if options.merge : |
|
514 # remove all elements and put them in a array |
|
515 # sort them with tc |
|
516 #put them back |
|
517 elements[:] = sorted(elements,key=lambda n: int(n.get('begin'))) |
|
518 |
|
519 #add to display node |
|
520 if display_content_node is not None: |
|
521 display_dec = None |
|
522 for dec in display_content_node.iterchildren(tag="decoupage"): |
|
523 if dec.get('idens','') == ensemble_id and dec.get('id', '') == decoupage_id: |
|
524 display_dec = dec |
|
525 break |
|
526 if display_dec is None and ensemble_id and decoupage_id: |
|
527 etree.SubElement(display_content_node, "decoupage", attrib={'idens': ensemble_id, 'id': decoupage_id, 'tagsSelect':''}) |
|
528 |
|
529 output_data = etree.tostring(root, encoding="utf-8", method="xml", pretty_print=False, xml_declaration=True).decode('utf-8') |
|
530 |
|
531 if content_file_write and content_file_write.find("http") == 0: |
|
532 |
|
533 project["ldt"] = output_data |
|
534 project['owner'] = project['owner'].replace('%7E','~') |
|
535 project['contents'] = [c_url.replace('%7E','~') for c_url in project['contents']] |
|
536 |
|
537 post_param = {} |
|
538 if options.post_param: |
|
539 post_param = json.loads(options.post_param) |
|
540 |
|
541 get_logger().debug("write http " + content_file_write) #@UndefinedVariable |
|
542 get_logger().debug("write http " + repr(post_param)) #@UndefinedVariable |
|
543 get_logger().debug("write http " + repr(project)) #@UndefinedVariable |
|
544 r = requests.put(content_file_write, data=json.dumps(project), headers={'content-type':'application/json'}, params=post_param) |
|
545 get_logger().debug("write http " + repr(r) + " content " + r.text) #@UndefinedVariable |
|
546 if r.status_code != requests.codes.ok: # pylint: disable=E1101 |
|
547 r.raise_for_status() |
|
548 else: |
|
549 if content_file_write and os.path.exists(content_file_write): |
|
550 dest_file_name = content_file_write |
|
551 else: |
|
552 dest_file_name = options.filename |
|
553 |
|
554 get_logger().debug("WRITE : " + dest_file_name) #@UndefinedVariable |
|
555 output = open(dest_file_name, "w") |
|
556 output.write(output_data) |
|
557 output.flush() |
|
558 output.close() |