--- a/server/ammico/serializers.py Thu Jun 04 20:03:04 2015 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,148 +0,0 @@
-from html.parser import HTMLParser
-import json
-
-import requests
-from rest_framework import serializers
-import xmltodict
-
-from ammico.models import Book, Slide
-from ammico.utils import fetchJson
-from config import URL_EXALEAD, URL_ORPHEO, URL_JAMESPOT
-
-
-class BookSerializer(serializers.ModelSerializer):
- count = serializers.SerializerMethodField('getCount')
-
- def getCount(self, book):
- return book.slides.all().count()
-
- class Meta:
- model = Book
- fields = ('id', 'user', 'idArticle', 'parent_visit', 'title', 'description', 'image', 'date', 'count', 'public')
-
-class SlideSerializer(serializers.ModelSerializer):
- details = serializers.SerializerMethodField('getStopInfo')
- index = serializers.SerializerMethodField('getOrder')
-
- def getStopInfo(self, slide):
- if (slide.idInventory != ""):
- #slide added from searched in MIMO database
- return extractFromMIMO(slide)
- else:
- #Get stop info from Oprheo
- #return extractFromOrpheo(slide)
- #Get stop info from Jamespot
- return extractFromJameSpot(slide)
-
- def getOrder(self, slide):
- return slide.book.get_slide_order().index(slide.id)
-
- class Meta:
- model = Slide
- fields = ('id', 'index', 'book', 'idStop', 'idInventory', 'title', 'description', 'image', 'date', 'favorite', 'details')
-
-
-def extractFromMIMO(slide):
- details = {}
- params = {'of': 'json', 'q': 'record_inventorynumber:' + slide.idInventory}
- data = requests.get(URL_EXALEAD, params=params)
- results = json.loads(data.content.decode('utf-8'))
- if (len(results['hits']) == 1):
- for i in results['hits'][0]['metas']:
- if('images' not in i):
- if ('name' in i):
- details[i['name']] = i['value']
- else:
- details.setdefault('images', []).append(i['images'][0]['value'])
- details['title'] = details.pop('name')
- return details
-
-def extractFromJameSpot(slide):
- details={}
- stopList = fetchJson(URL_JAMESPOT + '&f=list&o=article&type=stop&itemFormat=article')
- for stops in stopList:
- if (slide.idStop == stops['idStop']):
- details = stops
- details.setdefault('images', []).append(details.pop('firstImg'))
- details['description'] = details.pop('captionImg')
- return details
-
-def extractFromOrpheo(slide):
- details = {}
- params = {'id': slide.idStop.replace('stop-', '')}
- data = requests.get(URL_ORPHEO, params=params)
- parsed_data = xmltodict.parse(data.content.decode('utf-8'))
-
- if ('item' in parsed_data['result']):
- details = {
- 'title': parsed_data['result']['item']['title'],
- 'idInventory': parsed_data['result']['item']['Numero_inventaire'],
- }
- parser = MyHTMLParser()
-
- if (parsed_data['result']['item']['Description']):
- parser.feed(parsed_data['result']['item']['Description'])
- details['description']= parser.description
-
- if (parsed_data['result']['item']['Audio']):
- parser.feed(parsed_data['result']['item']['Audio'])
- details['images']= parser.images
- details['audio']= parser.audio
- details['captions']= parser.captions
-
- if (parsed_data['result']['item']['Video']):
- parser.feed(parsed_data['result']['item']['Video'])
- details['video']= parser.video
- details['images']= parser.images
-
- #if (parsed_data['result']['item']['Image']):
- # parser.feed(parsed_data['result']['item']['Image'])
- # details['image']= parser.caption
-
- return details
-
-#HTML parser to get the info from the Orpheo XML
-#Hopefully they'll update it because so far it's too slow
-#to parse the xml and then the html..
-#Better use extractFromJameSpot for now.
-class MyHTMLParser(HTMLParser):
-
- def __init__(self):
- self.starttag=''
- self.endtag=''
- self.audio=''
- self.video=''
- self.images=[]
- self.captions=[]
- self.description=''
- self.captionList=''
- HTMLParser.__init__(self)
-
- def handle_starttag(self, tag, attrs):
- if (self.starttag == 'audio' and tag == 'source'):
- for attr in attrs:
- if 'src' in attr:
- self.audio = attr[1]
- elif (tag == 'img'):
- for attr in attrs:
- if 'src' in attr:
- self.images.append(attr[1])
- elif (tag == 'video'):
- for attr in attrs:
- if 'poster' in attr:
- self.images.append(attr[1])
- if (self.starttag == 'video' and tag == 'source'):
- for attr in attrs:
- if 'src' in attr:
- print(attr)
- self.video = attr[1]
- self.starttag = tag
- def handle_endtag(self, tag):
- self.tag = tag
- def handle_data(self, data):
- if ('Caption_image' in data):
- self.captionList = True
- elif (self.captionList == True):
- self.captions.append(data)
- else:
- self.description = data
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/ammico/serializers/ammico.py Thu Jun 04 20:05:15 2015 +0200
@@ -0,0 +1,38 @@
+from rest_framework import serializers
+
+from ammico.models import Book, Slide
+from ammico.serializers.extractors import extractFromMIMO, extractFromJameSpot
+from ammico.serializers.taggit import TaggitSerializer, TagListSerializerField
+
+
+class BookSerializer(serializers.ModelSerializer):
+ count = serializers.SerializerMethodField('getCount')
+
+ def getCount(self, book):
+ return book.slides.all().count()
+
+ class Meta:
+ model = Book
+ fields = ('id', 'user', 'idArticle', 'parent_visit', 'title', 'description', 'image', 'date', 'count', 'public')
+
+class SlideSerializer(TaggitSerializer, serializers.ModelSerializer):
+ details = serializers.SerializerMethodField('getStopInfo')
+ index = serializers.SerializerMethodField('getOrder')
+ tags = TagListSerializerField(required=False)
+
+ def getStopInfo(self, slide):
+ if (slide.idInventory != ""):
+ #slide added from searched in MIMO database
+ return extractFromMIMO(slide)
+ else:
+ #Get stop info from Oprheo
+ #return extractFromOrpheo(slide)
+ #Get stop info from Jamespot
+ return extractFromJameSpot(slide)
+
+ def getOrder(self, slide):
+ return slide.book.get_slide_order().index(slide.id)
+
+ class Meta:
+ model = Slide
+ fields = ('id', 'index', 'book', 'idStop', 'idInventory', 'title', 'description', 'image', 'date', 'favorite', 'tags', 'details')
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/ammico/serializers/extractors.py Thu Jun 04 20:05:15 2015 +0200
@@ -0,0 +1,68 @@
+
+import json
+
+import requests
+import xmltodict
+
+from ammico.utils import fetchJson, MyHTMLParser
+from config import URL_EXALEAD, URL_JAMESPOT, URL_ORPHEO
+
+
+def extractFromMIMO(slide):
+ details = {}
+ params = {'of': 'json', 'q': 'record_inventorynumber:' + slide.idInventory}
+ data = requests.get(URL_EXALEAD, params=params)
+ results = json.loads(data.content.decode('utf-8'))
+ if (len(results['hits']) == 1):
+ for i in results['hits'][0]['metas']:
+ if('images' not in i):
+ if ('name' in i):
+ details[i['name']] = i['value']
+ else:
+ details.setdefault('images', []).append(i['images'][0]['value'])
+ details['title'] = details.pop('name')
+ return details
+
+def extractFromJameSpot(slide):
+ details={}
+ stopList = fetchJson(URL_JAMESPOT + '&f=list&o=article&type=stop&itemFormat=article')
+ for stops in stopList:
+ if (slide.idStop == stops['idStop']):
+ details = stops
+ details.setdefault('images', []).append(details.pop('firstImg'))
+ details['description'] = details.pop('captionImg')
+ return details
+
+def extractFromOrpheo(slide):
+ details = {}
+ params = {'id': slide.idStop.replace('stop-', '')}
+ data = requests.get(URL_ORPHEO, params=params)
+ parsed_data = xmltodict.parse(data.content.decode('utf-8'))
+
+ if ('item' in parsed_data['result']):
+ details = {
+ 'title': parsed_data['result']['item']['title'],
+ 'idInventory': parsed_data['result']['item']['Numero_inventaire'],
+ }
+ parser = MyHTMLParser()
+
+ if (parsed_data['result']['item']['Description']):
+ parser.feed(parsed_data['result']['item']['Description'])
+ details['description']= parser.description
+
+ if (parsed_data['result']['item']['Audio']):
+ parser.feed(parsed_data['result']['item']['Audio'])
+ details['images']= parser.images
+ details['audio']= parser.audio
+ details['captions']= parser.captions
+
+ if (parsed_data['result']['item']['Video']):
+ parser.feed(parsed_data['result']['item']['Video'])
+ details['video']= parser.video
+ details['images']= parser.images
+
+ #if (parsed_data['result']['item']['Image']):
+ # parser.feed(parsed_data['result']['item']['Image'])
+ # details['image']= parser.caption
+
+ return details
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/ammico/serializers/taggit.py Thu Jun 04 20:05:15 2015 +0200
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+from rest_framework import serializers
+
+
+class TagListSerializerField(serializers.ListField):
+ child = serializers.CharField(allow_blank = True, min_length=3)
+
+ def to_representation(self, obj):
+ if type(obj) is not list:
+ return [tag.name for tag in obj.all()]
+ return obj
+
+
+class TaggitSerializer(serializers.Serializer):
+ def create(self, validated_data):
+ to_be_tagged, validated_data = self._pop_tags(validated_data)
+
+ tag_object = super(TaggitSerializer, self).create(validated_data)
+
+ return self._save_tags(tag_object, to_be_tagged)
+
+ def update(self, object, validated_data):
+ to_be_tagged, validated_data = self._pop_tags(validated_data)
+
+ tag_object = super(TaggitSerializer, self).update(
+ object, validated_data)
+
+ return self._save_tags(tag_object, to_be_tagged)
+
+ def _save_tags(self, tag_object, tags):
+ for key in tags.keys():
+ tag_values = tags.get(key)
+ for tag in tag_values:
+ getattr(tag_object, key).add(tag)
+
+ for tag in tag_object.tags.names():
+ if tag not in tag_values:
+ getattr(tag_object, key).remove(tag)
+
+ return tag_object
+
+ def _pop_tags(self, validated_data):
+ to_be_tagged = {}
+
+ for key in self.fields.keys():
+ field = self.fields[key]
+ if isinstance(field, TagListSerializerField):
+ if key in validated_data:
+ to_be_tagged[key] = validated_data.pop(key)
+
+ return (to_be_tagged, validated_data)
\ No newline at end of file
--- a/server/ammico/utils.py Thu Jun 04 20:03:04 2015 +0200
+++ b/server/ammico/utils.py Thu Jun 04 20:05:15 2015 +0200
@@ -1,6 +1,9 @@
+from html.parser import HTMLParser
+import json
+
from django.core.cache import cache
import requests
-import json
+
def fetchJson(url):
cached = cache.get(url)
@@ -17,4 +20,49 @@
# Return the cached content
content = cached
- return json.loads(content.decode('utf-8'))['VAL']
\ No newline at end of file
+ return json.loads(content.decode('utf-8'))['VAL']
+
+#HTML parser to get the info from the Orpheo XML
+#Hopefully they'll update it because so far it's too slow
+#to parse the xml and then the html..
+#Better use extractFromJameSpot for now.
+class MyHTMLParser(HTMLParser):
+
+ def __init__(self):
+ self.starttag=''
+ self.endtag=''
+ self.audio=''
+ self.video=''
+ self.images=[]
+ self.captions=[]
+ self.description=''
+ self.captionList=''
+ HTMLParser.__init__(self)
+
+ def handle_starttag(self, tag, attrs):
+ if (self.starttag == 'audio' and tag == 'source'):
+ for attr in attrs:
+ if 'src' in attr:
+ self.audio = attr[1]
+ elif (tag == 'img'):
+ for attr in attrs:
+ if 'src' in attr:
+ self.images.append(attr[1])
+ elif (tag == 'video'):
+ for attr in attrs:
+ if 'poster' in attr:
+ self.images.append(attr[1])
+ if (self.starttag == 'video' and tag == 'source'):
+ for attr in attrs:
+ if 'src' in attr:
+ self.video = attr[1]
+ self.starttag = tag
+ def handle_endtag(self, tag):
+ self.tag = tag
+ def handle_data(self, data):
+ if ('Caption_image' in data):
+ self.captionList = True
+ elif (self.captionList == True):
+ self.captions.append(data)
+ else:
+ self.description = data
\ No newline at end of file