# HG changeset patch # User ymh # Date 1413371387 -7200 # Node ID eb1f7b06001f81df54bdaf227ccb7ac43c8a3f44 # Parent 16a1925df2df969c666a8d25323b8ec43875f883 add api + first version (not tested) of export annotation script diff -r 16a1925df2df -r eb1f7b06001f annot-server/webapp/__init__.py --- a/annot-server/webapp/__init__.py Tue Oct 14 08:12:54 2014 +0200 +++ b/annot-server/webapp/__init__.py Wed Oct 15 13:09:47 2014 +0200 @@ -17,3 +17,4 @@ app.config.from_object(config) import webapp.views +import webapp.api diff -r 16a1925df2df -r eb1f7b06001f annot-server/webapp/api.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/annot-server/webapp/api.py Wed Oct 15 13:09:47 2014 +0200 @@ -0,0 +1,18 @@ +# +# See LICENCE for detail +# Copyright (c) 2014 IRI +# + +import flask.ext.restless + +import database +import models +from webapp import app + +manager = flask.ext.restless.APIManager(app, session=database.db_session) + +manager.create_api(models.Annotation, + methods=['GET', 'POST', 'PUT', 'DELETE'], + url_prefix='/api/v1', + primary_key='uuid', + max_results_per_page=-1) diff -r 16a1925df2df -r eb1f7b06001f requirements.txt --- a/requirements.txt Tue Oct 14 08:12:54 2014 +0200 +++ b/requirements.txt Wed Oct 15 13:09:47 2014 +0200 @@ -1,5 +1,5 @@ Flask==0.10.1 -Flask-SQLAlchemy==2.0 +Flask-Restless==0.14.2 Jinja2==2.7.3 MarkupSafe==0.23 SQLAlchemy==0.9.8 @@ -8,8 +8,12 @@ autobahn==0.9.1 itsdangerous==0.24 midi==v0.2.3 +mimerender==0.5.4 ntplib==0.3.2 psycopg2==2.5.4 +python-dateutil==2.2 +python-mimeparse==0.1.4 +pytz==2014.7 six==1.8.0 txosc==0.2.0 txpostgres==1.2.0 diff -r 16a1925df2df -r eb1f7b06001f utils/export_annotations.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/utils/export_annotations.py Wed Oct 15 13:09:47 2014 +0200 @@ -0,0 +1,491 @@ +#!/usr/bin/env python +# coding=utf-8 + +from lxml import etree +from iri_tweet.models import setup_database, Tweet, User +from sqlalchemy.sql import select, func +from iri_tweet.utils import (set_logging_options, set_logging, get_filter_query, + get_logger) +import argparse +import json +import datetime +import requests +import os.path +import re +import sys +import time +import uuid #@UnresolvedImport +from dateutil.parser import parse as parse_date +import bisect + +#class TweetExclude(object): +# def __init__(self, id): +# self.id = id +# +# def __repr__(self): +# return "" % (self.id) + +LDT_CONTENT_REST_API_PATH = "api/ldt/1.0/contents/" +LDT_PROJECT_REST_API_PATH = "api/ldt/1.0/projects/" +DEFAULT_ANNOTATION_CHANNEL = 'ANNOT' + + +def get_filter(start_date, end_date, events, channels, user_whitelist): + res = [] + #TODO: check timezone... + if start_date: + res.append({'name': 'ts', 'op': ">=", 'val':start_date.isoformat() }) + if end_date: + res.append({'name': 'ts', 'op': "<=", 'val':end_date.isoformat() }) + if events: + res.append({'name': 'event', 'op': "in", 'val':events }) + if channels: + res.append({'name': 'channel', 'op': "in", 'val':channels }) + if user_whitelist: + res.append({'name': 'user', 'op': "in", 'val':user_whitelist }) + +# def parse_polemics(tw, extended_mode): +# """ +# parse polemics in text and return a list of polemic code. None if not polemic found +# """ +# polemics = {} +# for m in re.finditer("(\+\+|\-\-|\?\?|\=\=)",tw.text): +# pol_link = { +# '++' : u'OK', +# '--' : u'KO', +# '??' : u'Q', +# '==' : u'REF'}[m.group(1)] +# polemics[pol_link] = pol_link +# +# if extended_mode: +# if "?" in tw.text: +# polemics["Q"] = "Q" +# +# for entity in tw.entity_list: +# if entity.type == "entity_url": +# polemics["REF"] = "REF" +# +# if len(polemics) > 0: +# return polemics.keys() +# else: +# return None + +def get_options(): + + usage = "usage: %(prog)s [options]" + + parser = argparse.ArgumentParser(usage) + + parser.add_argument("-f", "--file", dest="filename", + help="write export to file", metavar="FILE", default="project.ldt") + parser.add_argument("-a", "--annot-url", dest="annot_url", + help="annotation server url", metavar="ANNOT-URL", required=True) + parser.add_argument("-s", "--start-date", dest="start_date", + help="start date", metavar="START_DATE", default=None) + parser.add_argument("-e", "--end-date", dest="end_date", + help="end date", metavar="END_DATE", default=None) + parser.add_argument("-I", "--content-file", dest="content_file", + help="Content file", metavar="CONTENT_FILE") + parser.add_argument("-c", "--content", dest="content", + help="Content url", metavar="CONTENT") + parser.add_argument("-V", "--video-url", dest="video", + help="video url", metavar="VIDEO") + parser.add_argument("-i", "--content-id", dest="content_id", + help="Content id", metavar="CONTENT_ID") + parser.add_argument("-x", "--exclude", dest="exclude", + help="file containing the id to exclude", metavar="EXCLUDE") + parser.add_argument("-C", "--color", dest="color", + help="Color code", metavar="COLOR", default="16763904") + parser.add_argument("-H", "--channel", dest="channels", + help="Channel", metavar="CHANNEL", default=[DEFAULT_ANNOTATION_CHANNEL], action="append") + parser.add_argument("-E", "--event", dest="events", + help="Event", metavar="EVENT", default=[], action="append") + parser.add_argument("-D", "--duration", dest="duration", type=int, + help="Duration", metavar="DURATION", default=None) + parser.add_argument("-n", "--name", dest="name", + help="Cutting name", metavar="NAME", default=u"annotations") + parser.add_argument("-R", "--replace", dest="replace", action="store_true", + help="Replace annotation ensemble", default=False) + parser.add_argument("-m", "--merge", dest="merge", action="store_true", + help="merge annotation ensemble, choose the first ensemble", default=False) + parser.add_argument("-L", "--list-conf", dest="listconf", + help="list of file to process", metavar="LIST_CONF", default=None) + parser.add_argument("-b", "--base-url", dest="base_url", + help="base URL of the platform", metavar="BASE_URL", default="http://ldt.iri.centrepompidou.fr/ldtplatform/") + parser.add_argument("-p", "--project", dest="project_id", + help="Project id", metavar="PROJECT_ID", default=None) + parser.add_argument("-P", "--post-param", dest="post_param", + help="Post param", metavar="POST_PARAM", default=None) + parser.add_argument("--user-whitelist", dest="user_whitelist", action="store", + help="A list of user screen name", metavar="USER_WHITELIST",default=None) + parser.add_argument("--cut", dest="cuts", action="append", + help="A cut with the forma ::", metavar="CUT", default=[]) + + set_logging_options(parser) + + return (parser.parse_args(), parser) + + +def find_delta(deltas, ts): + i = bisect.bisect_right(deltas, (ts+1,0)) + if i: + return deltas[i-1] + return (0,0) + + +def parse_duration(s): + try: + return int(s) + except ValueError: + parts = s.split(":") + if len(parts) < 2: + raise ValueError("Bad duration format") + time_params = { + 'hours': int(parts[0]), + 'minutes': int(parts[1]), + 'seconds': int(parts[2]) if len(parts)>2 else 0 + } + return int(datetime.timedelta(**time_params).total_seconds()*1000) + +def build_annotation_iterator(url, params, headers): + page = 0 + page_nb = 1 + while page < page_nb: + page += 1 + params['page'] = page + resp = requests.get(url, params=params, headers=headers) + if resp.code != 200: + return + resp_json = resp.json() + page_nb = resp_json.get('total_pages', 1) + for item in resp_json.get('results', []): + #TODO: add progress log + yield item + + +if __name__ == "__main__" : + + (options, parser) = get_options() + + set_logging(options) + + get_logger().debug("OPTIONS : " + repr(options)) #@UndefinedVariable + + + deltas = [(0,0)] + total_delta = 0 + if options.cuts: + cuts_raw = sorted([tuple([parse_duration(s) for s in c.split("::")]) for c in options.cuts]) + for c, d in cuts_raw: + deltas.append((c+total_delta, -1)) + total_delta += d + deltas.append((c+total_delta, total_delta)) + + if len(sys.argv) == 1 or options.database is None: + parser.print_help() + sys.exit(1) + + user_whitelist_file = options.user_whitelist + user_whitelist = None + + annotation_url = options.annot_url + + if options.listconf: + + parameters = [] + confdoc = etree.parse(options.listconf) + for node in confdoc.xpath("/twitter_export/file"): + params = {} + for snode in node: + if snode.tag == "path": + params['content_file'] = snode.text + params['content_file_write'] = snode.text + elif snode.tag == "project_id": + params['content_file'] = options.base_url + LDT_PROJECT_REST_API_PATH + snode.text + "/?format=json" + params['content_file_write'] = options.base_url + LDT_PROJECT_REST_API_PATH + snode.text + "/?format=json" + params['project_id'] = snode.text + elif snode.tag == "start_date": + params['start_date'] = snode.text + elif snode.tag == "end_date": + params['end_date'] = snode.text + elif snode.tag == "duration": + params['duration'] = int(snode.text) + elif snode.tag == "events": + params['events'] = [snode.text] + elif snode.tag == "channels": + params['channels'] = [snode.text] + if options.events or 'events' not in params : + params['events'] = options.events + if options.channels or 'channels' not in params : + params['channels'] = options.channels + + parameters.append(params) + else: + if options.project_id: + content_file = options.base_url + LDT_PROJECT_REST_API_PATH + options.project_id + "/?format=json" + else: + content_file = options.content_file + parameters = [{ + 'start_date' : options.start_date, + 'end_date' : options.end_date, + 'duration' : options.duration, + 'events' : options.events, + 'channels' : options.channels, + 'content_file' : content_file, + 'content_file_write' : content_file, + 'project_id' : options.project_id + }] + post_param = {} + if options.post_param: + post_param = json.loads(options.post_param) + + for params in parameters: + + get_logger().debug("PARAMETERS " + repr(params)) #@UndefinedVariable + + start_date_str = params.get("start_date",None) + end_date_str = params.get("end_date", None) + duration = params.get("duration", None) + content_file = params.get("content_file", None) + content_file_write = params.get("content_file_write", None) + channels = params.get('channels', [DEFAULT_ANNOTATION_CHANNEL]) + events = params.get('events', []) + + if user_whitelist_file: + with open(user_whitelist_file, 'r+') as f: + user_whitelist = list(set([s.strip() for s in f])) + + start_date = None + ts = None + if start_date_str: + start_date = parse_date(start_date_str) + ts = time.mktime(start_date.timetuple()) + + + root = None + ensemble_parent = None + + #to do : analyse situation ldt or iri ? filename set or not ? + + if content_file and content_file.find("http") == 0: + + get_logger().debug("url : " + content_file) #@UndefinedVariable + + r = requests.get(content_file, params=post_param) + get_logger().debug("url response " + repr(r) + " content " + repr(r.text)) #@UndefinedVariable + project = r.json() + text_match = re.match(r"\<\?\s*xml.*?\?\>(.*)", project['ldt'], re.I|re.S) + root = etree.fromstring(text_match.group(1) if text_match else project['ldt']) + + elif content_file and os.path.exists(content_file): + + doc = etree.parse(content_file) + root = doc.getroot() + + content_id = None + + if root is None: + + root = etree.Element(u"iri") + + project = etree.SubElement(root, u"project", {u"abstract":u"Polemics Tweets",u"title":u"Polemic Tweets", u"user":u"IRI Web", u"id":unicode(uuid.uuid4())}) + + medias = etree.SubElement(root, u"medias") + media = etree.SubElement(medias, u"media", {u"pict":u"", u"src":unicode(options.content), u"video":unicode(options.video), u"id":unicode(options.content_id), u"extra":u""}) + + annotations = etree.SubElement(root, u"annotations") + content = etree.SubElement(annotations, u"content", {u"id":unicode(options.content_id)}) + ensemble_parent = content + + content_id = options.content_id + + + if ensemble_parent is None: + file_type = None + for node in root: + if node.tag == "project": + file_type = "ldt" + break + elif node.tag == "head": + file_type = "iri" + break + + if file_type == "ldt": + media_nodes = root.xpath("//media") + if len(media_nodes) > 0: + media = media_nodes[0] + annotations_node = root.find(u"annotations") + if annotations_node is None: + annotations_node = etree.SubElement(root, u"annotations") + content_node = annotations_node.find(u"content") + if content_node is None: + content_node = etree.SubElement(annotations_node,u"content", id=media.get(u"id")) + ensemble_parent = content_node + content_id = content_node.get(u"id") + display_nodes = root.xpath("//displays/display/content[@id='%s']" % content_id) + if len(display_nodes) == 0: + get_logger().info("No display node found. Will not update display") + display_content_node = None + else: + display_content_node = display_nodes[0] + + elif file_type == "iri": + body_node = root.find(u"body") + if body_node is None: + body_node = etree.SubElement(root, u"body") + ensembles_node = body_node.find(u"ensembles") + if ensembles_node is None: + ensembles_node = etree.SubElement(body_node, u"ensembles") + ensemble_parent = ensembles_node + content_id = root.xpath("head/meta[@name='id']/@content")[0] + display_content_node = None + + + if ensemble_parent is None: + get_logger().error("Can not process file") #@UndefinedVariable + sys.exit() + + if options.replace: + for ens in ensemble_parent.iterchildren(tag=u"ensemble"): + ens_id = ens.get("id","") + if ens_id.startswith("annot_"): + ensemble_parent.remove(ens) + # remove in display nodes + if display_content_node is not None: + for cut_display in display_content_node.iterchildren(): + if cut_display.get('idens','') == ens_id: + display_content_node.remove(cut_display) + + ensemble = None + elements = None + + if options.merge: + for ens in ensemble_parent.findall(u"ensemble"): + if ens.get('id',"").startswith("annot_"): + ensemble = ens + break + if ensemble is not None: + elements = ensemble.find(u".//elements") + decoupage = ensemble.find(u"decoupage") + + if ensemble is None or elements is None: + ensemble = etree.SubElement(ensemble_parent, u"ensemble", {u"id":u"annot_" + unicode(uuid.uuid4()), u"title":u"Ensemble Twitter", u"author":u"IRI Web", u"abstract":u"Ensemble Twitter"}) + decoupage = etree.SubElement(ensemble, u"decoupage", {u"id": unicode(uuid.uuid4()), u"author": u"IRI Web"}) + + etree.SubElement(decoupage, u"title").text = unicode(options.name) + etree.SubElement(decoupage, u"abstract").text = unicode(options.name) + + elements = etree.SubElement(decoupage, u"elements") + + ensemble_id = ensemble.get('id', '') + decoupage_id = decoupage.get('id', '') if decoupage is not None else None + + end_date = None + if end_date_str: + end_date = parse_date(end_date_str) + elif start_date and duration: + end_date = start_date + datetime.timedelta(seconds=duration) + elif start_date and options.base_url: + # get duration from api + content_url = options.base_url + LDT_CONTENT_REST_API_PATH + content_id + "/?format=json" + r = requests.get(content_url) + duration = int(r.json()['duration']) + get_logger().debug("get duration " + content_url) #@UndefinedVariable + get_logger().debug("get duration " + repr(duration)) #@UndefinedVariable + + end_date = start_date + datetime.timedelta(seconds=int(duration/1000)) + + if end_date and deltas: + end_date = end_date + datetime.timedelta(milliseconds=deltas[-1][1]) + + + filters = get_filter(start_date, end_date, user_whitelist) + + headers = {'Content-Type': 'application/json'} + + params = { 'q':json.dumps({'filters':filters}), 'results_per_page': options.batch_size} + + + for annot in build_annotation_iterator(url, params, headers):: + #TODO : check timezone !!! + annot_ts_dt = annot['ts'] + annot_ts = int(time.mktime(annot_ts_dt.timetuple())) + if ts is None: + ts = annot_ts + annot_ts_rel = (annot_ts-ts) * 1000 + if deltas: + d = find_delta(deltas, annot_ts_rel) + if d[1] < 0: + continue + else : + annot_ts_rel -= d[1] + annot_content = annot.get('content',{'category':'', 'user':None}) + + username = annot_content.get('user', 'anon.') or 'anon.' + + category = annot_content.get('category', None) + if category is None: + continue + + element = etree.SubElement(elements, u"element" , {u"id":annot.get('uuid', uuid.uuid4()), u"color":unicode(options.color), u"author":unicode(username), u"date":unicode(annot_ts_dt.strftime("%Y/%m/%d")), u"begin": unicode(annot_ts_rel), u"dur":u"0"}) + etree.SubElement(element, u"title").text = unicode(username) + u": " + unicode(category.get('label', category.get('code', ''))) + etree.SubElement(element, u"abstract").text = unicode(category.get('label', category.get('code', ''))) + + tags_node = etree.SubElement(element, u"tags") + etree.SubElement(tags_node,u"tag").text = category.get('code', '') + + meta_element = etree.SubElement(element, u'meta') + + polemics_element = etree.Element(u'polemics') + etree.SubElement(polemics_element, u'polemic').text = category.get('code', '') + meta_element.append(polemics_element) + + etree.SubElement(meta_element, u"source", attrib={"url":annotation_url + "/" + annot['uuid'], "mimetype":u"application/json"}).text = etree.CDATA(json.dumps(annot)) + + # sort by tc in + if options.merge : + # remove all elements and put them in a array + # sort them with tc + #put them back + elements[:] = sorted(elements,key=lambda n: int(n.get('begin'))) + + #add to display node + if display_content_node is not None: + display_dec = None + for dec in display_content_node.iterchildren(tag=u"decoupage"): + if dec.get('idens','') == ensemble_id and dec.get('id', '') == decoupage_id: + display_dec = dec + break + if display_dec is None and ensemble_id and decoupage_id: + etree.SubElement(display_content_node, u"decoupage", attrib={'idens': ensemble_id, 'id': decoupage_id, 'tagsSelect':''}) + + output_data = etree.tostring(root, encoding="utf-8", method="xml", pretty_print=False, xml_declaration=True) + + if content_file_write and content_file_write.find("http") == 0: + + project["ldt"] = output_data + project['owner'] = project['owner'].replace('%7E','~') + project['contents'] = [c_url.replace('%7E','~') for c_url in project['contents']] + + post_param = {} + if options.post_param: + post_param = json.loads(options.post_param) + + get_logger().debug("write http " + content_file_write) #@UndefinedVariable + get_logger().debug("write http " + repr(post_param)) #@UndefinedVariable + get_logger().debug("write http " + repr(project)) #@UndefinedVariable + r = requests.put(content_file_write, data=json.dumps(project), headers={'content-type':'application/json'}, params=post_param); + get_logger().debug("write http " + repr(r) + " content " + r.text) #@UndefinedVariable + if r.status_code != requests.codes.ok: # @UndefinedVariable + r.raise_for_status() + else: + if content_file_write and os.path.exists(content_file_write): + dest_file_name = content_file_write + else: + dest_file_name = options.filename + + get_logger().debug("WRITE : " + dest_file_name) #@UndefinedVariable + output = open(dest_file_name, "w") + output.write(output_data) + output.flush() + output.close()