All Downloads are FREE. Search and download functionalities are using the official Maven repository.

uk.bl.wa.parsers.HtmlFeatureParser Maven / Gradle / Ivy

There is a newer version: 3.3.0
Show newest version
package uk.bl.wa.parsers;

/*
 * #%L
 * warc-indexer
 * $Id:$
 * $HeadURL:$
 * %%
 * Copyright (C) 2013 - 2018 The webarchive-discovery project contributors
 * %%
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as
 * published by the Free Software Foundation, either version 2 of the
 * License, or (at your option) any later version.
 * 
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public
 * License along with this program.  If not, see
 * .
 * #L%
 */

import java.io.IOException;
import java.io.InputStream;
import java.util.*;
import java.util.regex.Pattern;

import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.tika.exception.TikaException;
import org.apache.tika.metadata.Metadata;
import org.apache.tika.metadata.Property;
import org.apache.tika.mime.MediaType;
import org.apache.tika.parser.AbstractParser;
import org.apache.tika.parser.ParseContext;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.parser.ParseError;
import org.jsoup.parser.Parser;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;

import uk.bl.wa.util.Instrument;
import uk.bl.wa.util.Normalisation;

public class HtmlFeatureParser extends AbstractParser {

    /** */
    private static final long serialVersionUID = 1631417895901342814L;

    private static Log log = LogFactory.getLog(HtmlFeatureParser.class);
    
    private static final Set SUPPORTED_TYPES =
            Collections.unmodifiableSet(new HashSet(Arrays.asList(
                    MediaType.text("html"),
                  MediaType.application("xhtml")
            )));

    // The parser to use, preferring the XML variation as it does not 'fix' the
    // mark-up.
    private Parser parser = Parser.xmlParser();
    // Max errors to returm:
    private int max_errors;
    private final boolean normaliseLinks;
    private boolean extractImageLinks=false;
    
    public static final String ORIGINAL_PUB_DATE = "OriginalPublicationDate";
    // Explicit property to get faster link handling as it allows for set with multiple values (same as LINKS?)
    public static final Property LINK_LIST = Property.internalTextBag("LinkList");
    public static final Property LINKS = Property.internalTextBag("LINK-LIST");
    public static final String FIRST_PARAGRAPH = "FirstParagraph";
    public static final Property IMAGE_LINKS = Property.internalTextBag("ImageLinks");
    public static final Property DISTINCT_ELEMENTS = Property.internalTextBag("DISTINCT-ELEMENTS");
    public static final Property NUM_PARSE_ERRORS = Property
            .internalInteger("Html-Parse-Error-Count");
    public static final int DEFAULT_MAX_PARSE_ERRORS = 1000;

    // Setting this to true also adds the field url_norm to the Solr document in WARCIndexer
    public static final String CONF_LINKS_NORMALISE = "warc.index.extract.linked.normalise";
    public static final String CONF_LINKS_EXTRACT_IMAGE_LINKS = "warc.index.extract.linked.images";
    public static final boolean DEFAULT_LINKS_NORMALISE = false;

    /**
     * 
     */
    public HtmlFeatureParser() {
        this(ConfigFactory.empty());
       }
    public HtmlFeatureParser(Config conf) {
      normaliseLinks = conf.hasPath(CONF_LINKS_NORMALISE) ?
                   conf.getBoolean(CONF_LINKS_NORMALISE) :
                   DEFAULT_LINKS_NORMALISE;
           this.setMaxParseErrors(DEFAULT_MAX_PARSE_ERRORS);
   
           
          extractImageLinks = conf.hasPath(CONF_LINKS_EXTRACT_IMAGE_LINKS) ?
            conf.getBoolean(CONF_LINKS_EXTRACT_IMAGE_LINKS) :
            false;
           
    }

    /**
     * 
     * @param max_errors
     */
    public void setMaxParseErrors(int max_errors) {
        this.max_errors = max_errors;
        parser.setTrackErrors(max_errors);
    }

    /**
     * 
     * @return
     */
    public int getMaxParseErrors() {
        return this.max_errors;
    }
    
    /**
     * 
     * @return
     */
    public List getParseErrors() {
        return this.parser.getErrors();
    }

    /**
     * 
     */
    @Override
    public Set getSupportedTypes(ParseContext context) {
        return SUPPORTED_TYPES;
    }

    /**
     * 
     */
    @Override
    public void parse(InputStream stream, ContentHandler handler,
            Metadata metadata, ParseContext context) throws IOException,
            SAXException, TikaException {
        final long start = System.nanoTime();
        // Pick up the URL:
        String url = metadata.get( Metadata.RESOURCE_NAME_KEY );
        
        // Parse it using JSoup
        Document doc = null;
        try {
            doc = Jsoup.parse(stream, null, url, parser);
        } catch (java.nio.charset.IllegalCharsetNameException e ) {
            log.warn("Jsoup parse had to assume UTF-8: "+e);
            doc = Jsoup.parse(stream, "UTF-8", url );
        } catch( Exception e ) {
            log.error("Jsoup parse failed: "+e);
        } finally {
            if( doc == null ) return;
        }
        Instrument.timeRel("HTMLAnalyzer.analyze#parser", "HtmlFeatureParser.parse#jsoupparse", start);

        final long nonJsoupStart = System.nanoTime();
        // Record the number of errors found:
        if (parser.getErrors() != null)
            metadata.set(NUM_PARSE_ERRORS, parser.getErrors().size());

        // Get the links (no image links):
        Set links = this.extractLinks(doc);
        if( links != null && links.size() > 0 ) {
            metadata.set(LINK_LIST, links.toArray(new String[links.size()]));
        }
        
        //get the image links
        if (extractImageLinks){
          Set imageLinks = this.extractImageLinks(doc);
          if( imageLinks  != null && imageLinks .size() > 0 ) {
             metadata.set(IMAGE_LINKS, imageLinks.toArray(new String[imageLinks.size()]));
          }
        }
        
        // Get the publication date, from BBC pages:
        for( Element meta : doc.select("meta[name=OriginalPublicationDate]") ) {
            metadata.set(ORIGINAL_PUB_DATE, meta.attr("content"));
            //log.debug(ORIGINAL_PUB_DATE + ": " + meta.attr("content"));
        }
        
        // Grab the first paragraph with text, and extract the text:
        for( Element p : doc.select("p") )  {
            String pt = p.text();
            if( pt != null ) {
                pt = pt.trim();
                if( pt.length() > 0 ) {
                    metadata.set(FIRST_PARAGRAPH, p.text() );
                    //log.debug(FIRST_PARAGRAPH + ": " +p.text() );
                    break;
                }
            }
        }
        
        // Grab the list of distinct elements used in the page:
        Set de = new HashSet();
        for( Element e : doc.select("*") ) {
            // ELEMENT_NAME matching to weed out the worst false positives caused by JavaScript
            // This handles cases such as '