All Downloads are FREE. Search and download functionalities are using the official Maven repository.

talismane.talismane-machine-learning.6.1.2.source-code.reference.conf Maven / Gradle / Ivy

There is a newer version: 6.1.8
Show newest version
talismane {
  ### These parameters concern training only
  ### It is assumed that after training, each persisted model will be self-contained in terms of its parameters
  machine-learning {
    generic {
      # Which algorithm to choose? options are MaxEnt, LinearSVM, Perceptron
      algorithm = LinearSVM

      # In how many distinct events should a feature appear in order to get included in the model?
      cutoff = 0

      # the number of training iterations
      iterations = 100

      # Parameters for linear-svm family models
      LinearSVM {
        # The solver type. Options are:
        # L2R_LR: L2-regularized logistic regression (primal)
        # L1R_LR: L1-regularized logistic regression
        # L2R_LR_DUAL: L2-regularized logistic regression (dual)
        solver-type = "L2R_LR"

        # Parameter C, typical values are powers of 2, from 2^-5 to 2^5
        cost = 1.0

        # Parameter epsilon, typical values are 0.01, 0.05, 0.1, 0.5
        epsilon = 0.1

        # should we treat each outcome explicity as one vs. rest, allowing for an event to have multiple outcomes?  
        one-vs-rest = false

        # If one vs. rest is used, should we balance the event counts so that
        # the current outcome events are approximately proportional to the
        # other outcome events?
        balance-event-counts = false
      }

      # Parameters for Maximum Entropy models
      MaxEnt {
        # Sigma for Gaussian smoothing on maxent training.
        sigma = 0.0

        # Additive smoothing parameter during maxent training.
        smoothing = 0.0
      }

      # Parameters for perceptron models
      Perceptron {
        # Exit training early if accuracy hasn't significantly changed in 3 iterations, where "significantly" is defined by this tolerance
        tolerance = 1e-5

        # If true, will only average for iterations <= 20 and then for all perfect
        # squares (25, 36, 49, 64, 81, 100, etc.).
        average-at-intervals = false

        # method for scoring perceptron classifiers.
        # * additive: Use standard additive perceptron scoring, where each state's score is
        #      the sum of scores of incremental states.
        # * normalisedLinear:  Use a geometric mean of state probabilities, where the probability is
        #      calculated by first transforming all scores to positive (minimum = 1),
        #      and then dividing by the total.
        # * normalisedExponential: Use a geometric mean of state probabilities, where the probability is
        #      e^{score/absmax(scores)}, where absmax is the maximum absolute value
        #      of scores. This gives us positive scores from 1/e to e. We then
        #      divide by the total.
        scoring = "additive"

        # iterations at which the perceptron model should be saved
        observation-points = []
      }
    }
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy