All Downloads are FREE. Search and download functionalities are using the official Maven repository.

edu.berkeley.nlp.lm.NgramLanguageModel Maven / Gradle / Ivy

The newest version!
package edu.berkeley.nlp.lm;

import java.util.ArrayList;
import java.util.List;
import java.util.Random;

import edu.berkeley.nlp.lm.collections.Counter;

/**
 * 
 * Base interface for an n-gram language model, which exposes only inefficient
 * convenience methods. See {@link ContextEncodedNgramLanguageModel} and
 * {@link ArrayEncodedNgramLanguageModel} for more efficient accessors.
 * 
 * @author adampauls
 * 
 * @param 
 * 
 */
public interface NgramLanguageModel
{

	/**
	 * Maximum size of n-grams stored by the model.
	 * 
	 * @return
	 */
	public int getLmOrder();

	/**
	 * Each LM must have a WordIndexer which assigns integer IDs to each word W
	 * in the language.
	 * 
	 * @return
	 */
	public WordIndexer getWordIndexer();

	/**
	 * Scores a complete sentence, taking appropriate care with the start- and
	 * end-of-sentence symbols. This is a convenience method and will generally
	 * be inefficient.
	 * 
	 * @return
	 */
	public float scoreSentence(List sentence);

	/**
	 * 
	 * Scores an n-gram. This is a convenience method and will generally be
	 * relatively inefficient. More efficient versions are available in
	 * {@link ArrayEncodedNgramLanguageModel#getLogProb(int[], int, int)} and
	 * {@link ContextEncodedNgramLanguageModel#getLogProb(long, int, int, edu.berkeley.nlp.lm.ContextEncodedNgramLanguageModel.LmContextInfo)}
	 * .
	 */
	public float getLogProb(List ngram);
	
	/**
	 * Sets the (log) probability for an OOV word. Note that this is in general different
	 * from the log prob of the unk tag probability. 
	 * @author adampauls
	 *
	 */
	public void setOovWordLogProb(float logProb);

	public static class StaticMethods
	{

		public static  int[] toIntArray(final List ngram, final ArrayEncodedNgramLanguageModel lm) {
			final int[] ints = new int[ngram.size()];
			final WordIndexer wordIndexer = lm.getWordIndexer();
			for (int i = 0; i < ngram.size(); ++i) {
				ints[i] = wordIndexer.getIndexPossiblyUnk(ngram.get(i));
			}
			return ints;
		}

		public static  List toObjectList(final int[] ngram, final ArrayEncodedNgramLanguageModel lm) {
			final List ret = new ArrayList(ngram.length);
			final WordIndexer wordIndexer = lm.getWordIndexer();
			for (int i = 0; i < ngram.length; ++i) {
				ret.add(wordIndexer.getWord(ngram[i]));
			}
			return ret;
		}

		/**
		 * Samples from this language model. This is not meant to be
		 * particularly efficient
		 * 
		 * @param random
		 * @return
		 */
		public static  List sample(Random random, final NgramLanguageModel lm) {
			return sample(random, lm, 1.0);
		}

		public static  List sample(Random random, final NgramLanguageModel lm, final double sampleTemperature) {
			List ret = new ArrayList();
			ret.add(lm.getWordIndexer().getStartSymbol());
			while (true) {
				final int contextEnd = ret.size();
				final int contextStart = Math.max(0, contextEnd - lm.getLmOrder() + 1);
				Counter c = new Counter();
				List ngram = new ArrayList(ret.subList(contextStart, contextEnd));
				ngram.add(null);
				for (int index = 0; index < lm.getWordIndexer().numWords(); ++index) {

					W word = lm.getWordIndexer().getWord(index);
					if (word.equals(lm.getWordIndexer().getStartSymbol())) continue;
					if (ret.size() <= 1 && word.equals(lm.getWordIndexer().getEndSymbol())) continue;

					ngram.set(ngram.size() - 1, word);
					c.setCount(word, Math.exp(sampleTemperature * lm.getLogProb(ngram) * Math.log(10)));
				}
				W sample = c.sample(random);
				ret.add(sample);
				if (sample.equals(lm.getWordIndexer().getEndSymbol())) break;

			}
			return ret.subList(1, ret.size() - 1);
		}

	}

}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy