Skip to content

Commit

Permalink
[FIX] website_multi: create sitemap by website
Browse files Browse the repository at this point in the history
Before this commit, the first website which ask to generate
the sitemap (with 12h cache) will force his own domain, and page
was not filtered.

Now, genereate 1 sitemap by website, and the enumeration of page
take the website_id in consideration and use key instead of xml_id.
  • Loading branch information
JKE-be committed Dec 2, 2016
1 parent 944dd79 commit cf7da98
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 3 deletions.
74 changes: 72 additions & 2 deletions website_multi/controllers/main.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import re

import werkzeug

import openerp
import datetime
from itertools import islice
from openerp.addons.web import http
from openerp.http import request
from openerp.addons.website.controllers.main import Website
from openerp.addons.website.controllers.main import Website, SITEMAP_CACHE_TIME, LOC_PER_SITEMAP


class website_multi(Website):
Expand Down Expand Up @@ -42,3 +44,71 @@ def pagenew(self, path, noredirect=False, add_menu=None):
return werkzeug.wrappers.Response(url, mimetype='text/plain')

return werkzeug.utils.redirect(url)

@http.route()
def sitemap_xml_index(self):
cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context
current_website = request.website
ira = request.registry['ir.attachment']
iuv = request.registry['ir.ui.view']
mimetype ='application/xml;charset=utf-8'
content = None

def create_sitemap(url, content):
ira.create(cr, uid, dict(
datas=content.encode('base64'),
mimetype=mimetype,
type='binary',
name=url,
url=url,
), context=context)

dom = [('url', '=' , '/sitemap-%d.xml' % current_website.id), ('type', '=', 'binary')]
sitemap = ira.search_read(cr, uid, dom, ('datas', 'create_date'), limit=1, context=context)

if sitemap:
# Check if stored version is still valid
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
create_date = datetime.datetime.strptime(sitemap[0]['create_date'], server_format)
delta = datetime.datetime.now() - create_date
if delta < SITEMAP_CACHE_TIME:
content = sitemap[0]['datas'].decode('base64')

if not content:
# Remove all sitemaps in ir.attachments as we're going to regenerated them
dom = [('type', '=', 'binary'), '|', ('url', '=like' , '/sitemap-%d-%%.xml' % current_website.id),
('url', '=' , '/sitemap-%d.xml' % current_website.id)]
sitemap_ids = ira.search(cr, uid, dom, context=context)
if sitemap_ids:
ira.unlink(cr, uid, sitemap_ids, context=context)

pages = 0
first_page = None
locs = current_website.sudo(user=current_website.user_id.id).enumerate_pages()
while True:
values = {
'locs': islice(locs, 0, LOC_PER_SITEMAP),
'url_root': request.httprequest.url_root[:-1],
}
urls = iuv.render(cr, uid, 'website.sitemap_locs', values, context=context)
if urls.strip():
page = iuv.render(cr, uid, 'website.sitemap_xml', dict(content=urls), context=context)
if not first_page:
first_page = page
pages += 1
create_sitemap('/sitemap-%d-%d.xml' % (current_website.id, pages), page)
else:
break
if not pages:
return request.not_found()
elif pages == 1:
content = first_page
else:
# Sitemaps must be split in several smaller files with a sitemap index
content = iuv.render(cr, uid, 'website.sitemap_index_xml', dict(
pages=range(1, pages + 1),
url_root=request.httprequest.url_root,
), context=context)
create_sitemap('/sitemap-%d.xml' % current_website.id, content)

return request.make_response(content, [('Content-Type', mimetype)])
28 changes: 27 additions & 1 deletion website_multi/models/website.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from openerp.addons.website.models.website import slugify
from openerp.addons.web.http import request
from werkzeug.exceptions import NotFound

import werkzeug

class website(orm.Model):

Expand Down Expand Up @@ -114,3 +114,29 @@ def _auth_method_public(self):
dummy, request.uid = self.pool['ir.model.data'].get_object_reference(request.cr, openerp.SUPERUSER_ID, 'base', 'public_user')
else:
request.uid = request.session.uid

def _get_converters(self):
converters = super(ir_http, self)._get_converters()
converters['page'] = PageMultiWebsiteConverter
return converters


class PageMultiWebsiteConverter(werkzeug.routing.PathConverter):
def generate(self, cr, uid, query=None, args={}, context=None):
View = request.registry['ir.ui.view']
dom = [('page', '=', True), '|', ('website_id', '=', request.website.id), ('website_id', '=', False)]
views = View.search_read(cr, uid, dom, fields=['key', 'xml_id', 'priority','write_date'], order='name', context=context)

for view in views:
key = view['key'] or view['xml_id'] or ''
xid = key.startswith('website.') and key[8:] or key

if xid=='homepage': continue
if query and query.lower() not in xid.lower(): continue
record = {'loc': xid}
if view['priority'] != 16:
record['__priority'] = min(round(view['priority'] / 32.0, 1), 1)
if view['write_date']:
record['__lastmod'] = view['write_date'][:10]
yield record

0 comments on commit cf7da98

Please sign in to comment.