Check the complete working example featured in the Scraping pages full of JavaScript. It uses Web::Scraper for HTML processing and Gtk3::WebKit to process dynamic content. However, the later one is quite a PITA to install. If there are not-that-many pages you need to scrape (< 1000), fetching the post-processed DOM content through PhantomJS is an interesting option. I've written the following script for that purpose:
var page = require('webpage').create(),
system = require('system'),
fs = require('fs'),
address, output;
if (system.args.length < 3 || system.args.length > 5) {
console.log('Usage: phantomjs --load-images=no html.js URL filename');
phantom.exit(1);
} else {
address = system.args[1];
output = system.args[2];
page.open(address, function (status) {
if (status !== 'success') {
console.log('Unable to load the address!');
} else {
fs.write(output, page.content, 'w');
}
phantom.exit();
});
}
There's something like that on the CPAN already, it's a module called Wight, but I haven't tested it yet.