--- a/script/utils/export_twitter_alchemy.py Tue Oct 01 16:51:32 2013 +0200
+++ b/script/utils/export_twitter_alchemy.py Mon Oct 21 12:38:23 2013 +0200
@@ -3,11 +3,11 @@
from lxml import etree
from iri_tweet.models import setup_database, Tweet, User
-from optparse import OptionParser #@UnresolvedImport
from sqlalchemy import Table, Column, BigInteger, event, bindparam
from sqlalchemy.sql import select, func
from iri_tweet.utils import (set_logging_options, set_logging, get_filter_query,
get_logger)
+import argparse
import anyjson
import datetime
import requests
@@ -62,60 +62,64 @@
return None
def get_options():
- parser = OptionParser()
- parser.add_option("-f", "--file", dest="filename",
+
+ usage = "usage: %(prog)s [options]"
+
+ parser = argparse.ArgumentParser(usage)
+
+ parser.add_argument("-f", "--file", dest="filename",
help="write export to file", metavar="FILE", default="project.ldt")
- parser.add_option("-d", "--database", dest="database",
+ parser.add_argument("-d", "--database", dest="database",
help="Input database", metavar="DATABASE")
- parser.add_option("-s", "--start-date", dest="start_date",
+ parser.add_argument("-s", "--start-date", dest="start_date",
help="start date", metavar="START_DATE", default=None)
- parser.add_option("-e", "--end-date", dest="end_date",
+ parser.add_argument("-e", "--end-date", dest="end_date",
help="end date", metavar="END_DATE", default=None)
- parser.add_option("-I", "--content-file", dest="content_file",
+ parser.add_argument("-I", "--content-file", dest="content_file",
help="Content file", metavar="CONTENT_FILE")
- parser.add_option("-c", "--content", dest="content",
+ parser.add_argument("-c", "--content", dest="content",
help="Content url", metavar="CONTENT")
- parser.add_option("-V", "--video-url", dest="video",
+ parser.add_argument("-V", "--video-url", dest="video",
help="video url", metavar="VIDEO")
- parser.add_option("-i", "--content-id", dest="content_id",
+ parser.add_argument("-i", "--content-id", dest="content_id",
help="Content id", metavar="CONTENT_ID")
- parser.add_option("-x", "--exclude", dest="exclude",
+ parser.add_argument("-x", "--exclude", dest="exclude",
help="file containing the id to exclude", metavar="EXCLUDE")
- parser.add_option("-C", "--color", dest="color",
+ parser.add_argument("-C", "--color", dest="color",
help="Color code", metavar="COLOR", default="16763904")
- parser.add_option("-H", "--hashtag", dest="hashtag",
+ parser.add_argument("-H", "--hashtag", dest="hashtag",
help="Hashtag", metavar="HASHTAG", default=[], action="append")
- parser.add_option("-D", "--duration", dest="duration", type="int",
+ parser.add_argument("-D", "--duration", dest="duration", type=int,
help="Duration", metavar="DURATION", default=None)
- parser.add_option("-n", "--name", dest="name",
+ parser.add_argument("-n", "--name", dest="name",
help="Cutting name", metavar="NAME", default=u"Tweets")
- parser.add_option("-R", "--replace", dest="replace", action="store_true",
- help="Replace tweet ensemble", metavar="REPLACE", default=False)
- parser.add_option("-m", "--merge", dest="merge", action="store_true",
- help="merge tweet ensemble, choose the first ensemble", metavar="MERGE", default=False)
- parser.add_option("-L", "--list-conf", dest="listconf",
+ parser.add_argument("-R", "--replace", dest="replace", action="store_true",
+ help="Replace tweet ensemble", default=False)
+ parser.add_argument("-m", "--merge", dest="merge", action="store_true",
+ help="merge tweet ensemble, choose the first ensemble", default=False)
+ parser.add_argument("-L", "--list-conf", dest="listconf",
help="list of file to process", metavar="LIST_CONF", default=None)
- parser.add_option("-E", "--extended", dest="extended_mode", action="store_true",
- help="Trigger polemic extended mode", metavar="EXTENDED", default=False)
- parser.add_option("-b", "--base-url", dest="base_url",
+ parser.add_argument("-E", "--extended", dest="extended_mode", action="store_true",
+ help="Trigger polemic extended mode", default=False)
+ parser.add_argument("-b", "--base-url", dest="base_url",
help="base URL of the platform", metavar="BASE_URL", default="http://ldt.iri.centrepompidou.fr/ldtplatform/")
- parser.add_option("-p", "--project", dest="project_id",
+ parser.add_argument("-p", "--project", dest="project_id",
help="Project id", metavar="PROJECT_ID", default=None)
- parser.add_option("-P", "--post-param", dest="post_param",
+ parser.add_argument("-P", "--post-param", dest="post_param",
help="Post param", metavar="POST_PARAM", default=None)
- parser.add_option("--user-whitelist", dest="user_whitelist", action="store",
+ parser.add_argument("--user-whitelist", dest="user_whitelist", action="store",
help="A list of user screen name", metavar="USER_WHITELIST",default=None)
set_logging_options(parser)
- return parser.parse_args() + (parser,)
+ return (parser.parse_args(), parser)
if __name__ == "__main__" :
- (options, args, parser) = get_options()
+ (options, parser) = get_options()
set_logging(options)
@@ -250,7 +254,7 @@
get_logger().debug("url : " + content_file) #@UndefinedVariable
r = requests.get(content_file, params=post_param)
- #get_logger().debug("url response " + repr(r) + " content " + repr(r.text)) #@UndefinedVariable
+ get_logger().debug("url response " + repr(r) + " content " + repr(r.text)) #@UndefinedVariable
project = r.json()
text_match = re.match(r"\<\?\s*xml.*?\?\>(.*)", project['ldt'], re.I|re.S)
root = etree.fromstring(text_match.group(1) if text_match else project['ldt'])
@@ -300,6 +304,13 @@
content_node = etree.SubElement(annotations_node,u"content", id=media.get(u"id"))
ensemble_parent = content_node
content_id = content_node.get(u"id")
+ display_nodes = root.xpath("//displays/display/content[@id='%s']" % content_id)
+ if len(display_nodes) == 0:
+ get_logger().info("No display node found. Will not update display")
+ display_content_node = None
+ else:
+ display_content_node = display_nodes[0]
+
elif file_type == "iri":
body_node = root.find(u"body")
if body_node is None:
@@ -309,6 +320,7 @@
ensembles_node = etree.SubElement(body_node, u"ensembles")
ensemble_parent = ensembles_node
content_id = root.xpath("head/meta[@name='id']/@content")[0]
+ display_content_node = None
if ensemble_parent is None:
@@ -317,16 +329,26 @@
if options.replace:
for ens in ensemble_parent.iterchildren(tag=u"ensemble"):
- if ens.get("id","").startswith("tweet_"):
+ ens_id = ens.get("id","")
+ if ens_id.startswith("tweet_"):
ensemble_parent.remove(ens)
+ # remove in display nodes
+ if display_content_node is not None:
+ for cut_display in display_content_node.iterchildren():
+ if cut_display.get('idens','') == ens_id:
+ display_content_node.remove(cut_display)
ensemble = None
elements = None
if options.merge:
- ensemble = ensemble_parent.find(u"ensemble")
- if ensemble is not None:
- elements = ensemble.find(u".//elements")
+ for ens in ensemble_parent.findall(u"ensemble"):
+ if ens.get('id',"").startswith("tweet_"):
+ ensemble = ens
+ break
+ if ensemble is not None:
+ elements = ensemble.find(u".//elements")
+ decoupage = ensemble.find(u"decoupage")
if ensemble is None or elements is None:
ensemble = etree.SubElement(ensemble_parent, u"ensemble", {u"id":u"tweet_" + unicode(uuid.uuid4()), u"title":u"Ensemble Twitter", u"author":u"IRI Web", u"abstract":u"Ensemble Twitter"})
@@ -337,6 +359,9 @@
elements = etree.SubElement(decoupage, u"elements")
+ ensemble_id = ensemble.get('id', '')
+ decoupage_id = decoupage.get('id', '') if decoupage is not None else None
+
end_date = None
if end_date_str:
end_date = parse_date(end_date_str)
@@ -399,14 +424,24 @@
#put them back
elements[:] = sorted(elements,key=lambda n: int(n.get('begin')))
-
-
+ #add to display node
+ if display_content_node is not None:
+ display_dec = None
+ for dec in display_content_node.iterchildren(tag=u"decoupage"):
+ if dec.get('idens','') == ensemble_id and dec.get('id', '') == decoupage_id:
+ display_dec = dec
+ break
+ if display_dec is None and ensemble_id and decoupage_id:
+ etree.SubElement(display_content_node, u"decoupage", attrib={'idens': ensemble_id, 'id': decoupage_id, 'tagsSelect':''})
output_data = etree.tostring(root, encoding="utf-8", method="xml", pretty_print=False, xml_declaration=True)
if content_file_write and content_file_write.find("http") == 0:
project["ldt"] = output_data
+ project['owner'] = project['owner'].replace('%7E','~')
+ project['contents'] = [c_url.replace('%7E','~') for c_url in project['contents']]
+
post_param = {}
if options.post_param:
post_param = anyjson.loads(options.post_param)