com.meliorbis.economics.aggregate.derivagg.DerivativeAggregationSolver Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of ModelSolver Show documentation
Show all versions of ModelSolver Show documentation
A library for solving economic models, particularly
macroeconomic models with heterogeneous agents who have model-consistent
expectations
package com.meliorbis.economics.aggregate.derivagg;
import static com.meliorbis.numerics.DoubleArrayFactories.createArrayOfSize;
import static com.meliorbis.numerics.IntArrayFactories.createIntArrayOfSize;
import static com.meliorbis.numerics.generic.primitives.impl.DoubleArrayFunctions.log;
import static com.meliorbis.numerics.generic.primitives.impl.DoubleArrayFunctions.maximumRelativeDifference;
import static com.meliorbis.numerics.generic.primitives.impl.Interpolation.interpolateFunction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.DoubleStream;
import org.apache.commons.lang.ArrayUtils;
import com.meliorbis.economics.aggregate.ByShockLevelAggregateSolver;
import com.meliorbis.economics.infrastructure.simulation.DiscretisedDistribution;
import com.meliorbis.economics.infrastructure.simulation.DiscretisedDistributionSimulator;
import com.meliorbis.economics.infrastructure.simulation.TransitionRecord;
import com.meliorbis.economics.model.Model;
import com.meliorbis.economics.model.ModelConfig;
import com.meliorbis.economics.model.ModelException;
import com.meliorbis.economics.model.StateWithControls;
import com.meliorbis.numerics.DoubleArrayFactories;
import com.meliorbis.numerics.function.primitives.DoubleGridFunction;
import com.meliorbis.numerics.function.primitives.DoubleGridFunctionFactory;
import com.meliorbis.numerics.generic.IntegerArray;
import com.meliorbis.numerics.generic.primitives.DoubleArray;
import com.meliorbis.numerics.generic.primitives.DoubleNaryOp;
import com.meliorbis.numerics.generic.primitives.DoubleUnaryOp;
import com.meliorbis.utils.Pair;
import com.meliorbis.utils.Utils;
/**
* Base class for solvers that use derivative aggregation to update the aggregate transition rule, this class handles all the
* mechanics of derivative aggregation. Subclasses must implement the abstract methods that provide the basic input derivatives for
* first order aggregation, and can also override those required for second order aggregation.
*
* @author Tobias Grasl
*
* @param The configuration class
* @param The state class
* @param The model class
*/
public abstract class DerivativeAggregationSolver, M extends Model> extends
ByShockLevelAggregateSolver
{
private static final Logger LOG = Logger.getLogger(DerivativeAggregationSolver.class.getName());
final private DoubleGridFunctionFactory _functionFactory;
private List> _policyFunctionInputs;
final protected DiscretisedDistributionSimulator _simulator;
private DoubleArray> _simulationStates;
private boolean _useLogs = false;
private boolean _secondOrder = false;
private DoubleArray>[] _lastDerivs = null;
private double _derivError = Double.POSITIVE_INFINITY;
final private int _nIndStates;
final private int _nAggStates;
final private int _nAggControls;
protected DerivativeAggregationSolver(M model_, C config_, DiscretisedDistributionSimulator simulator_)
{
super(model_, config_);
_simulator = simulator_;
_functionFactory = new DoubleGridFunctionFactory();
_policyFunctionInputs = new ArrayList>();
_policyFunctionInputs.addAll(_config.getIndividualEndogenousStatesForSimulation());
_policyFunctionInputs.addAll(_config.getIndividualExogenousStates());
_policyFunctionInputs.addAll(_config.getAggregateEndogenousStates());
_policyFunctionInputs.addAll(_config.getAggregateControls());
// Does not include aggregate shocks because those are selected away
// before constructing the fn!
// TODO: WRONG! Should use appropriate slice of normalised start of period states
// NOTE: This approach only handels single-individual-state models at the time of writing
_simulationStates = _model.createSimulationGrid();
_simulationStates.fillDimensions(_config.getIndividualEndogenousStatesForSimulation().get(0), 0);
// Extract some useful info from config
_nIndStates = _config.getIndividualEndogenousStateCount();
_nAggStates = _config.getAggregateEndogenousStateCount();
_nAggControls = _config.getAggregateControlCount();
}
public void useLogs(boolean useLogs_)
{
_useLogs = useLogs_;
}
public void secondOrder(boolean secondOrder_)
{
if(secondOrder_ && _nAggControls != 0)
{
throw new RuntimeException("Second order approximation not yet implemented for models "
+ "with controls!");
}
_secondOrder = secondOrder_;
}
@Override
protected Pair, DoubleArray>> calculateTransitionForShocks(int[] currentShockIndexes_, S state_) throws ModelException
{
return deriveAggregateTransition(currentShockIndexes_,state_);
}
final class DAValues
{
final DiscretisedDistribution _currentDistribution;
DiscretisedDistribution _futureDistribution;
final int[] _currentShocks;
final double[] _currentAggs;
final double[] _currentControls;
double[] _futureAggs;
DoubleArray>[][] dx_dTheta;
DoubleArray>[][] dx_dTheta_of;
DoubleArray>[][] dF_dx;
DoubleArray>[][] dF_dx_of;
DoubleArray>[][] future_dF_dx;
DoubleArray>[][] future_dF_dx_of;
DoubleArray>[][] future_dF_dX;
DoubleArray>[][] future_dF_dX_of;
double[] dX_dTheta;
DoubleArray>[][] dx_dX;
DoubleArray>[][] dx_dX_of;
DoubleGridFunction indStatesPolicy;
DoubleGridFunction indControlsPolicy;
int nIndControls = 0;
DoubleArray>[][] controlAggregationDerivatives;
DoubleArray>[][] controlAggregationByAggDerivatives;
DoubleArray>[][] controlAggregationDerivatives_of;
DoubleArray>[][] controlAggregationByAggDerivatives_of;
DoubleGridFunction[] df_dX;
DoubleArray>[] df_dX_of;
DoubleGridFunction[] df_dC;
DoubleArray>[] df_dC_of;
DoubleGridFunction[] df_dx;
DoubleArray>[] df_dx_of;
DoubleArray>[][] dc_dX;
DoubleArray>[][] dc_dX_of;
DoubleArray>[][] dc_dC;
DoubleArray>[][] dc_dC_of;
DoubleArray>[][] dc_dx;
DoubleArray>[][] dc_dx_of;
int firstAggState;
double[] aggVarSelector;
DoubleArray> dC_dX;
DoubleArray> dXp_dX;
DoubleArray> d2Xp_dX2;
public DAValues(int[] currentShocks_,
DiscretisedDistribution currentDistribution_,
double[] currentAggs_, double[] currentControls_)
{
_currentShocks = currentShocks_;
_currentDistribution = currentDistribution_;
_currentAggs = currentAggs_;
_currentControls = currentControls_;
int nAggStates = _config.getAggregateEndogenousStateCount();
int nIndStates = _config.getIndividualEndogenousStateCount();
dF_dx = new DoubleArray>[nAggStates][nIndStates];
dF_dx_of = new DoubleArray>[nAggStates][nIndStates];
future_dF_dx = new DoubleArray>[nAggStates][nIndStates];
future_dF_dx_of = new DoubleArray>[nAggStates][nIndStates];
future_dF_dX = new DoubleArray>[nAggStates][nAggStates];
future_dF_dX_of = new DoubleArray>[nAggStates][nAggStates];
dx_dX = new DoubleArray>[nAggStates][nIndStates];
dx_dX_of = new DoubleArray>[nAggStates][nIndStates];
dx_dTheta = new DoubleArray>[nAggStates][nIndStates];
dx_dTheta_of = new DoubleArray>[nAggStates][nIndStates];
dX_dTheta = new double[nAggStates];
df_dX = new DoubleGridFunction[_nAggStates];
df_dX_of = new DoubleArray>[_nAggStates];
df_dC = new DoubleGridFunction[_nAggControls];
df_dC_of = new DoubleArray>[_nAggControls];
df_dx = new DoubleGridFunction[_nIndStates];
df_dx_of = new DoubleArray>[_nIndStates];
dC_dX = createArrayOfSize(_nAggControls,_nAggStates);
dXp_dX = createArrayOfSize(_nAggStates,_nAggStates);
if(_secondOrder)
{
d2Xp_dX2 = createArrayOfSize(_nAggStates, _nAggStates,_nAggStates);
}
aggVarSelector = Utils.repeatArray(Double.NaN, nIndStates + _config.getIndividualExogenousStateCount() + _nAggStates + _nAggControls);
firstAggState = _model.getAggDetStateStart();
// Derivation of aggregate states happens at the value for the aggregates
System.arraycopy(currentAggs_,0, aggVarSelector, firstAggState, _nAggStates);
System.arraycopy(currentControls_,0, aggVarSelector, firstAggState+_nAggStates, _nAggControls);
}
}
void collect_f(DAValues values_, S state_)
{
// Get the individual transition just for the relevant shock
DoubleArray> fs = state_.getIndividualPolicyForSimulation();
int[] exoStateSelector = Utils.repeatArray(-1, fs.numberOfDimensions());
System.arraycopy(values_._currentShocks, 0,
exoStateSelector, _model.getAggStochStateStart(), values_._currentShocks.length);
// Also take the middle value of any normalising/permanent shocks - by design, these do not affect the current period but are
// only used to calculate the future expectations.
for(int permShockIndex = 0; permShockIndex < _config.getAggregateNormalisingStateCount();
permShockIndex++)
{
DoubleArray> permShockValues =
_config.getAggregateNormalisingExogenousStates().get(permShockIndex);
exoStateSelector[_model.getAggStochStateStart() +
values_._currentShocks.length + permShockIndex] =
permShockValues.numberOfElements()/2;
}
values_.indStatesPolicy = _functionFactory.createFunction(_policyFunctionInputs, fs.at(exoStateSelector));
// We also need to derive controls, if they are present
if( _nAggControls > 0 )
{
DoubleArray> fc = ((StateWithControls>) state_).
getIndividualControlsPolicyForSimulation();
values_.indControlsPolicy =
_functionFactory.createFunction(_policyFunctionInputs, fc.at(exoStateSelector));
// The number of controls is the number of outputs of the controls policy, which is in the highest dimension
values_.nIndControls = values_.indControlsPolicy.getDimensionality();
values_.dc_dX = new DoubleArray>[values_.nIndControls][_nAggStates];
values_.dc_dX_of = new DoubleArray>[values_.nIndControls][_nAggStates];
values_.dc_dC = new DoubleArray>[values_.nIndControls][_nAggControls];
values_.dc_dC_of = new DoubleArray>[values_.nIndControls][_nAggControls];
values_.dc_dx = new DoubleArray>[values_.nIndControls][_nIndStates];
values_.dc_dx_of = new DoubleArray>[values_.nIndControls][_nIndStates];
// if(values_.nIndControls > 1)
// {
// throw new UnsupportedOperationException("Multiple individual controls "
// + "do not work yet due to overflow handling");
// }
/* Initialise the arrays which hold derivatives of control aggregation
*/
if( _nAggControls > 0 )
{
values_.controlAggregationDerivatives =
new DoubleArray>[_nAggControls][values_.nIndControls];
values_.controlAggregationByAggDerivatives =
new DoubleArray>[_nAggControls][_nAggStates];
values_.controlAggregationDerivatives_of =
new DoubleArray>[_nAggControls][values_.nIndControls];
values_.controlAggregationByAggDerivatives_of =
new DoubleArray>[_nAggControls][_nAggStates];
}
}
}
/**
* Derive the aggregate transition for the given shock values, current
* calculation state and based on the provided distribution
*
* @param currentShockIndexes_ The index of each aggregate shock, indicating
* the values for which the derivation should be performed
* @param state_ The current calculation state
*
* @return The newly derived transition
*
* @throws ModelException In the event of failure
*/
protected Pair,DoubleArray>> deriveAggregateTransition(
final int[] currentShockIndexes_, S state_) throws ModelException
{
DiscretisedDistribution simState = state_.getDistributionForState(currentShockIndexes_);
// does not affect the outcome because the individual transition is
// invariant under the aggregate
// shocks, but need to choose a value that has a non-zero probability of
// being reached.
IntegerArray> shocksArray = createIntArrayOfSize(currentShockIndexes_.length);
shocksArray.fill(ArrayUtils.toObject(currentShockIndexes_));
IntegerArray> futureShocks = createIntArrayOfSize(currentShockIndexes_.length+_config.getAggregateNormalisingStateCount());
for (int i = 0; i < currentShockIndexes_.length; i++)
{
futureShocks.set(currentShockIndexes_[i],i);
}
final TransitionRecord transitionRecord = _simulator.simulateTransition(simState, this._model, state_, shocksArray, futureShocks);
/*
* Calculate the current aggregate states of the economy, which is the
* point at which the derivative will be taken
*/
final double[] aggregateStates = transitionRecord.getStates().toArray();
final double[] aggregateControls = transitionRecord.getControls().toArray();
/*
* Calculate the impact of transformations on individual variables
*/
// First, get the basic derivatives with that are needed
if(_nIndStates > 1)
{
throw new UnsupportedOperationException("Multiple individual states do not work yet due to overflow handling");
}
DAValues values = new DAValues(currentShockIndexes_,
(DiscretisedDistribution)simState,
aggregateStates, aggregateControls);
values._futureDistribution = transitionRecord.getResultingDistribution();
// The Y values at the point at which the approximation is taken
values._futureAggs = _model.calculateAggregateStates(values._futureDistribution, shocksArray, state_);
/* Collect tranformation and aggregation derivatives
*/
collect_dx_dTheta(values);
collect_dF_dx(values);
/* Calculate the implied derivative of aggregate variables by their transformation
* parameter
*/
calc_dX_dTheta(values);
/* Calculate the consequent derivatives of individual states by aggregate states
*/
calc_dx_dX(values);
/* Grab the individual policy function(s)
*/
collect_f(values, state_);
/*
* Now calculate the derivatives of the individual policy w.r.t. both
* individual and aggregate states
*/
if(_nAggControls > 0)
{
collect_dD_dc(values);
}
/*
* Calculate the derivatives of the individual states policy function w.r.t the
* aggregate states
*/
calc_df(values);
calc_dc(values);
/* First, if necessary calculate the derivatives of aggregate controls by aggregate states
*/
if(_nAggControls > 0)
{
calc_dC_dX(values);
}
/* Now we need to aggregate the future aggregates from future individual vars,
* and the aggregation derivatives may have changed - recalculate
*/
calc_future_dF(values);
calc_dXp_dX(values);
// Calculate the second order derivatives if necessary
if(_secondOrder)
{
calcSecondDerivs(values);
if(_lastDerivs != null)
{
if(_lastDerivs[currentShockIndexes_[0]] != null)
{
double currentError = maximumRelativeDifference(_lastDerivs[currentShockIndexes_[0]], values.d2Xp_dX2);
//if(currentError < _derivError)
{
_derivError = currentError;
}
}
}
else
{
_lastDerivs = new DoubleArray[_config.getAggregateEndogenousStates().get(0).numberOfElements()];
}
_lastDerivs[currentShockIndexes_[0]] = values.d2Xp_dX2.copy();
}
List transitionDimensions = new ArrayList();
Utils.addLengthsToList(transitionDimensions, _config.getAggregateEndogenousStates());
transitionDimensions.add(_nAggStates);
DoubleArray> newTransition = createArrayOfSize(transitionDimensions);
DoubleArray> predictedControls = null;
if(_nAggControls > 0) {
transitionDimensions.set(transitionDimensions.size()-1, _nAggControls);
predictedControls = createArrayOfSize(transitionDimensions);
fillTransitionSingleLog(predictedControls, aggregateStates, aggregateControls, values.dC_dX);
}
state_.setDerivatives(currentShockIndexes_,
DoubleArrayFactories.createArray(aggregateStates),
DoubleArrayFactories.createArray(values._futureAggs),
values.dXp_dX.copy(),
_secondOrder ? values.d2Xp_dX2.copy() : null);
if(LOG.isLoggable(Level.FINE)) {
double[] array = ArrayUtils.addAll(ArrayUtils.addAll(aggregateStates, values._futureAggs),
values.dXp_dX.toArray());
if(_secondOrder) {
array = ArrayUtils.addAll(array, values.d2Xp_dX2.toArray());
}
LOG.fine(String.format("DA Values [Shocks: %s]: %s",
Arrays.toString(currentShockIndexes_),Arrays.toString(array)));
}
if(!_useLogs)
{
fillTransition(newTransition, aggregateStates, values._futureAggs,
values.dXp_dX,values.d2Xp_dX2);
}
else
{
fillTransitionLogs(newTransition, aggregateStates, values._futureAggs,
values.dXp_dX, values.d2Xp_dX2, currentShockIndexes_[0]);
}
return new Pair,DoubleArray>>(newTransition, predictedControls);
}
void calcSecondDerivs(DAValues values_)
{
/* Double derivatives of the individual policy by individual states
*/
DoubleGridFunction[][] doubleIndDerivs = new DoubleGridFunction[_nIndStates][_nIndStates];
DoubleArray>[][] doubleIndDerivs_of = new DoubleArray>[_nIndStates][_nIndStates];
/* Double derivatives of the individual policy by one individual and one aggregate state
*/
DoubleGridFunction[][] doubleIndAggDerivs = new DoubleGridFunction[_nIndStates][_nAggStates];
DoubleArray>[][] doubleIndAggDerivs_of = new DoubleArray>[_nIndStates][_nAggStates];
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
// Double derivatives of the individual policy by individual states
// Only need to calculate them from indIndex upward by young's thm
for (int secIndIndex = indIndex; secIndIndex < _nIndStates; secIndIndex++)
{
doubleIndDerivs[indIndex][secIndIndex] = values_.df_dx[indIndex].partialDerivative(secIndIndex);
// Interpolate to get the overflow values
doubleIndDerivs_of[indIndex][secIndIndex] = overflowValues(doubleIndDerivs[indIndex][secIndIndex], 0, values_._currentDistribution);
if(indIndex != secIndIndex)
{
// Reference them from the equivalent point (young's theorem)
doubleIndDerivs[secIndIndex][indIndex] = doubleIndDerivs[indIndex][secIndIndex];
doubleIndDerivs_of[secIndIndex][indIndex] = doubleIndDerivs_of[indIndex][secIndIndex];
}
}
/* Here we use the already calculated partial derivative by the aggregate, because the one by the
* ind state has been restricted in the aggregate dimension
*/
// Double derivatives of the individual policy by one individual and one aggregate state
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
doubleIndAggDerivs[indIndex][aggIndex] = values_.df_dX[aggIndex].restrict(values_.aggVarSelector).partialDerivative(indIndex);
// Interpolate to get the overflow values
doubleIndAggDerivs_of[indIndex][aggIndex] = overflowValues(doubleIndAggDerivs[indIndex][aggIndex], 0, values_._currentDistribution);
}
}
/* Double derivatives of the individual policy by aggregate states
*/
DoubleGridFunction[][] doubleAggDerivs = new DoubleGridFunction[_nAggStates][_nAggStates];
DoubleArray>[][] doubleAggDerivs_of = new DoubleArray>[_nAggStates][_nAggStates];
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
// Double derivatives of the individual policy by individual states
// Only need to calculate them from indIndex upward by young's thm
for (int aggIndex2 = aggIndex; aggIndex2 < _nAggStates; aggIndex2++)
{
doubleAggDerivs[aggIndex][aggIndex2] = values_.df_dX[aggIndex].partialDerivative(values_.firstAggState+aggIndex2,values_.aggVarSelector);
doubleAggDerivs_of[aggIndex][aggIndex2] = overflowValues(doubleAggDerivs[aggIndex][aggIndex2], 0, values_._currentDistribution);
if(aggIndex != aggIndex2)
{
doubleAggDerivs[aggIndex2][aggIndex] = doubleAggDerivs[aggIndex][aggIndex2];
doubleAggDerivs_of[aggIndex2][aggIndex] = doubleAggDerivs_of[aggIndex][aggIndex2];
}
}
}
DoubleArray>[][][] doubleThetaDerivatives = new DoubleArray>[_nAggStates][_nAggStates][_nIndStates];
DoubleArray>[][][] doubleThetaDerivatives_of = new DoubleArray>[_nAggStates][_nAggStates][_nIndStates];
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int aggIndex2 = aggIndex; aggIndex2 < _nAggStates; aggIndex2++)
{
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
doubleThetaDerivatives[aggIndex][aggIndex2][indIndex] = doubleDeriveIndividualTransformationByTheta(aggIndex,aggIndex2,indIndex,values_._currentShocks, values_._currentAggs);
doubleThetaDerivatives_of[aggIndex][aggIndex2][indIndex] = overflowValues(doubleThetaDerivatives[aggIndex][aggIndex2][indIndex], 0, values_._currentDistribution);
if(aggIndex != aggIndex2)
{
doubleThetaDerivatives[aggIndex2][aggIndex][indIndex] = doubleThetaDerivatives[aggIndex][aggIndex2][indIndex];
doubleThetaDerivatives_of[aggIndex2][aggIndex][indIndex] = doubleThetaDerivatives_of[aggIndex][aggIndex2][indIndex];
}
}
}
}
DoubleNaryOp sum = summands -> {
return DoubleStream.of(summands).sum();
};
/* Now we can start adding stuff up
*/
DoubleArray>[][] policyByIndAggDerivs = new DoubleArray>[_nIndStates][_nAggStates];
DoubleArray>[][] policyByIndAggDerivs_of = new DoubleArray>[_nIndStates][_nAggStates];
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
policyByIndAggDerivs[indIndex][aggIndex] = doubleIndAggDerivs[indIndex][aggIndex].getValues().copy();
policyByIndAggDerivs_of[indIndex][aggIndex] = doubleIndAggDerivs_of[indIndex][aggIndex].copy();
for (int secIndIndex = indIndex; secIndIndex < _nIndStates; secIndIndex++)
{
policyByIndAggDerivs[indIndex][aggIndex].modifying().add(values_.dx_dX[aggIndex][secIndIndex].multiply(doubleIndDerivs[indIndex][secIndIndex].getValues()));
policyByIndAggDerivs_of[indIndex][aggIndex].modifying().add(values_.dx_dX_of[aggIndex][secIndIndex].multiply(doubleIndDerivs_of[indIndex][secIndIndex]));
}
}
}
DoubleArray>[][] policyByAggAggDerivs = new DoubleArray>[_nAggStates][_nAggStates];
DoubleArray>[][] policyByAggAggDerivs_of = new DoubleArray>[_nAggStates][_nAggStates];
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int aggIndex2 = aggIndex; aggIndex2 < _nAggStates; aggIndex2++)
{
policyByAggAggDerivs[aggIndex][aggIndex2] = doubleAggDerivs[aggIndex][aggIndex2].getValues().copy();
policyByAggAggDerivs_of[aggIndex][aggIndex2] = doubleAggDerivs_of[aggIndex][aggIndex2].copy();
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
policyByAggAggDerivs[aggIndex][aggIndex2].modifying().add(values_.dx_dX[aggIndex2][indIndex].multiply(doubleIndAggDerivs[indIndex][aggIndex].getValues()));
policyByAggAggDerivs_of[aggIndex][aggIndex2].modifying().add(values_.dx_dX_of[aggIndex2][indIndex].multiply(doubleIndAggDerivs_of[indIndex][aggIndex]));
}
policyByAggAggDerivs[aggIndex2][aggIndex] = policyByAggAggDerivs[aggIndex][aggIndex2];
policyByAggAggDerivs_of[aggIndex2][aggIndex] = policyByAggAggDerivs_of[aggIndex][aggIndex2];
}
}
/* TODO: Aggregation double deriv calculation !!!
*/
double[][] aggDoubleThetaDerivs = new double[_nAggStates][_nAggStates];
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int aggIndex2 = aggIndex; aggIndex2 < _nAggStates; aggIndex2++)
{
// The impact from all the individual arrays can be added in one go!
DoubleArray> totalImpact = _nIndStates == 1 ? doubleThetaDerivatives[aggIndex][aggIndex2][0] :
doubleThetaDerivatives[aggIndex][aggIndex2][0].with(Arrays.copyOfRange(doubleThetaDerivatives[aggIndex][aggIndex2],1,_nIndStates)).map(sum);
DoubleArray> totalImpact_of = _nIndStates == 1 ? doubleThetaDerivatives_of[aggIndex][aggIndex2][0] :
doubleThetaDerivatives_of[aggIndex][aggIndex2][0].with(Arrays.copyOfRange(doubleThetaDerivatives_of[aggIndex][aggIndex2],1,_nIndStates)).map(sum);
aggDoubleThetaDerivs[aggIndex2][aggIndex] = aggDoubleThetaDerivs[aggIndex][aggIndex2] =
totalImpact.multiply(values_._currentDistribution._density).sum() + totalImpact_of.multiply(values_._currentDistribution._overflowProportions).sum();
}
}
DoubleArray>[][][] indByAggDoubleDerivs = new DoubleArray[_nIndStates][_nAggStates][_nAggStates];
DoubleArray>[][][] indByAggDoubleDerivs_of = new DoubleArray[_nIndStates][_nAggStates][_nAggStates];
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int aggIndex2 = aggIndex; aggIndex2 < _nAggStates; aggIndex2++)
{
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
indByAggDoubleDerivs[indIndex][aggIndex2][aggIndex] = indByAggDoubleDerivs[indIndex][aggIndex][aggIndex2] =
doubleThetaDerivatives[aggIndex][aggIndex2][indIndex].subtract(values_.df_dX[aggIndex].restrict(values_.aggVarSelector).getValues().lastDimSlice(indIndex).multiply(aggDoubleThetaDerivs[aggIndex][aggIndex2])).multiply(1d/(values_.dX_dTheta[aggIndex]*values_.dX_dTheta[aggIndex2]));
indByAggDoubleDerivs_of[indIndex][aggIndex2][aggIndex] = indByAggDoubleDerivs_of[indIndex][aggIndex][aggIndex2] = doubleThetaDerivatives_of[aggIndex][aggIndex2][indIndex].subtract(values_.df_dX_of[aggIndex].multiply(aggDoubleThetaDerivs[aggIndex][aggIndex2])).multiply(1d/(values_.dX_dTheta[aggIndex]*values_.dX_dTheta[aggIndex2]));
}
}
}
/* Now we are in a position to calculate the aggregate double derivatives
*/
for(int futureAgg = 0; futureAgg < _nAggStates; futureAgg++)
{
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int aggIndex2 = aggIndex; aggIndex2 < _nAggStates; aggIndex2++)
{
DoubleArray> totalImpact = createArrayOfSize(values_._currentDistribution._density.size());
DoubleArray> totalImpact_of = createArrayOfSize(values_._currentDistribution._overflowProportions.size());
for(int futureInd = 0; futureInd < _nIndStates; futureInd++)
{
totalImpact.modifying().add(doubleAggDerivs[aggIndex][aggIndex2].restrict(values_.aggVarSelector).getValues().lastDimSlice(futureInd).copy());
totalImpact_of.modifying().add(doubleAggDerivs_of[aggIndex][aggIndex2].copy());
for(int currentInd = 0; currentInd < _nIndStates; currentInd++)
{
totalImpact.modifying().add(doubleIndAggDerivs[currentInd][aggIndex2].getValues().lastDimSlice(futureInd).multiply(values_.dx_dX[aggIndex][currentInd]));
totalImpact_of.modifying().add(doubleIndAggDerivs_of[currentInd][aggIndex2].multiply(values_.dx_dX_of[aggIndex][currentInd]));
totalImpact.modifying().add(values_.df_dx[currentInd].getValues().lastDimSlice(futureInd).multiply(indByAggDoubleDerivs[currentInd][aggIndex][aggIndex2]));
totalImpact_of.modifying().add(values_.df_dx_of[currentInd].multiply(indByAggDoubleDerivs_of[currentInd][aggIndex][aggIndex2]));
}
totalImpact.modifying().multiply(values_.dF_dx[futureAgg][futureInd]);
totalImpact_of.modifying().multiply(values_.dF_dx_of[futureAgg][futureInd]);
}
values_.d2Xp_dX2.set(totalImpact.multiply(values_._currentDistribution._density).sum() + totalImpact_of.multiply(values_._currentDistribution._overflowProportions).sum(),futureAgg,aggIndex,aggIndex2);
if(aggIndex != aggIndex2)
{
values_.d2Xp_dX2.set(values_.d2Xp_dX2.get(futureAgg,aggIndex,aggIndex2),futureAgg,aggIndex2,aggIndex);
}
}
}
}
/* TODO: The above does not work, but for the single-state scenario this calculation below is sufficient and does the trick
*/
// double altDoubleDeriv = values.aggregationDerivatives[0][0].multiply(
// doubleIndDerivs[0][0].getValues().lastDimSlice(0).multiply(values.values.dxdX[0][0]).multiply(values.values.dxdX[0][0]).
// add(doubleIndAggDerivs[0][0].getValues().lastDimSlice(0).multiply(values.dxdX[0][0]).multiply(2)).
// add(indDerivs[0].getValues().lastDimSlice(0).multiply(doubleThetaDerivatives[0][0][0]).divide(values.aggThetaDerivs[0]).divide(values.aggThetaDerivs[0])).
// add(doubleAggDerivs[0][0].getValues().lastDimSlice(0))
// ).multiply(simState._density).sum();
//
// altDoubleDeriv += values.aggregationDerivatives_of[0][0].multiply(
// doubleIndDerivs_of[0][0].multiply(values.dxdX_of[0][0]).multiply(values.dxdX_of[0][0]).
// add(doubleIndAggDerivs_of[0][0].multiply(values.dxdX_of[0][0]).multiply(2)).
// add(indDerivs_of[0].multiply(doubleThetaDerivatives_of[0][0][0]).divide(values.aggThetaDerivs[0]).divide(values.aggThetaDerivs[0])).
// add(doubleAggDerivs_of[0][0])
// ).multiply(simState._overflowProportions).sum();
// aggDoubleDerivatives.set(altDoubleDeriv,0,0,0);
}
void calc_dXp_dX(DAValues values)
{
for (int currentAggIndex = 0; currentAggIndex < _nAggStates; currentAggIndex++)
{
for (int futureAggIndex = 0; futureAggIndex < _nAggStates; futureAggIndex++)
{
DoubleArray> totalFutureIndividualImpact = createArrayOfSize(values._currentDistribution._density.size());
DoubleArray> totalFutureIndividualImpact_of = createArrayOfSize(values._currentDistribution._overflowAverages.size());
for (int futureIndIndex = 0; futureIndIndex < _nIndStates; futureIndIndex++)
{
DoubleArray> totalFutureIndividualStateChange = createArrayOfSize(values._currentDistribution._density.size());
DoubleArray> totalFutureIndividualStateChange_of = createArrayOfSize(values._currentDistribution._overflowAverages.size());
if (values.future_dF_dx[futureAggIndex][futureIndIndex] != null)
{
for (int currentIndIndex = 0; currentIndIndex < _nIndStates; currentIndIndex++)
{
// Get the derivative just of the future variable
// with respect to the current one
DoubleArray> indDeriv = values.df_dx[currentIndIndex].getValues().lastDimSlice(futureIndIndex);
// And the change in the current variable with respect to current aggregate
DoubleArray> indByAgg = values.dx_dX[currentAggIndex][currentIndIndex];
// Add the product of the two partial derivatives (chain rule)
totalFutureIndividualStateChange.modifying().add(indDeriv.multiply(indByAgg));
totalFutureIndividualStateChange_of.modifying().add(values.df_dx_of[currentIndIndex].multiply(values.dx_dX_of[currentAggIndex][currentIndIndex]));
//Also get the indirect impacts via current control changes
if(_nAggControls > 0)
{
for (int currentControlIndex = 0; currentControlIndex < _nAggControls; currentControlIndex++)
{
// Skip controls which can't affect the states
if(values.df_dC[currentControlIndex] == null) continue;
// Add the product of the two partial derivatives (chain rule)
totalFutureIndividualStateChange.modifying().add(
values.df_dC[currentControlIndex].getValues().lastDimSlice(currentIndIndex).multiply(
values.dC_dX.get(currentControlIndex,currentAggIndex)));
totalFutureIndividualStateChange_of.modifying().add(
values.df_dC_of[currentControlIndex].multiply(
values.dC_dX.get(currentControlIndex,currentAggIndex)));
}
}
}
// Add the component due to the change in the aggregate
// itself
if(!_secondOrder)
{
totalFutureIndividualStateChange.modifying().add(values.df_dX[currentAggIndex].getValues().lastDimSlice(futureIndIndex));
totalFutureIndividualStateChange_of.modifying().add(values.df_dX_of[currentAggIndex]/* TODO: Why no lastDimSlice here?*/);
}
else
{
DoubleArray> vals = values.df_dX[currentAggIndex].restrict(values.aggVarSelector).getValues();
totalFutureIndividualStateChange.modifying().add(vals.lastDimSlice(futureIndIndex));
totalFutureIndividualStateChange_of.modifying().add(values.df_dX_of[currentAggIndex]);
}
totalFutureIndividualImpact.modifying().add(
totalFutureIndividualStateChange.multiply(values.future_dF_dx[futureAggIndex][futureIndIndex]));
totalFutureIndividualImpact_of.modifying().add(
totalFutureIndividualStateChange_of.multiply(values.future_dF_dx_of[futureAggIndex][futureIndIndex]));
}
}
double indirectImpactFromOtherAggStates = 0d;
if(_nAggStates > 1) {
// These need to be summed over the future distribution, since they are on
// grid values in the future
DoubleArray> totalIndirectImpact = createArrayOfSize(values._currentDistribution._density.size());
DoubleArray> totalIndirectImpact_of = createArrayOfSize(values._currentDistribution._overflowAverages.size());
for (int otherFutureAggIndex = 0; otherFutureAggIndex < futureAggIndex; otherFutureAggIndex++)
{
totalIndirectImpact.add(
values.future_dF_dX[futureAggIndex][otherFutureAggIndex].multiply(values.dXp_dX.get(otherFutureAggIndex,
currentAggIndex)));
totalIndirectImpact_of.add(
values.future_dF_dX_of[futureAggIndex][otherFutureAggIndex].multiply(values.dXp_dX.get(otherFutureAggIndex,
currentAggIndex)));
}
indirectImpactFromOtherAggStates = totalIndirectImpact.multiply(values._futureDistribution._density).sum()+
totalIndirectImpact_of.multiply(values._futureDistribution._overflowProportions).sum();
}
values.dXp_dX.set(
totalFutureIndividualImpact.multiply(values._currentDistribution._density).sum()
+ totalFutureIndividualImpact_of.multiply(values._currentDistribution._overflowProportions).sum() +
indirectImpactFromOtherAggStates,
futureAggIndex, currentAggIndex);
}
}
}
void calc_future_dF(DAValues values)
{
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
/*
* This gets the impact of individual states on aggregate states
*/
values.future_dF_dx[aggIndex][indIndex] =
deriveAggregationByIndividualState(aggIndex, indIndex, values._currentShocks,
values._futureAggs, false);
// Interpolate to get the overflow values
values.future_dF_dx_of[aggIndex][indIndex] = overflowValues(values.future_dF_dx[aggIndex][indIndex], 0, values._futureDistribution);
}
/*
* This gets the impact of each aggregate on the values of other
* aggregates
*
* NOTE: Only aggregates with lower indexes can affect a particular
* aggregate, to avoid circularity
*/
for (int otherAgg = 0; otherAgg < aggIndex; otherAgg++)
{
values.future_dF_dX[aggIndex][otherAgg] =
deriveAggregationByAggregateState(aggIndex, otherAgg, values._currentShocks,
values._futureAggs);
// Interpolate to get the overflow values
values.future_dF_dX_of[aggIndex][otherAgg] = overflowValues(values.future_dF_dX[aggIndex][otherAgg], 0, values._futureDistribution);
}
}
}
void calc_dC_dX(DAValues values) throws ModelException
{
/* Calculate the impact of aggregate state changes on the implied values of the control determinants
*/
DoubleArray> impliedDeterminantByAggStateDerivatives = createArrayOfSize(
_nAggControls, _nAggStates);
DoubleArray> impliedDeterminantByAggControlDerivatives = createArrayOfSize(
_nAggControls);
for (int currentControlIndex = 0; currentControlIndex < _nAggControls; currentControlIndex++)
{
/* Note this sign is opposite from the paper, but reversed below...
*/
// Subtract from the control impact the gradient of the target at the current control value
if(_config.getAggregateControls().get(currentControlIndex).numberOfElements() == 1)
{
impliedDeterminantByAggControlDerivatives.set(1, currentControlIndex);
}
else
{
DoubleArray> totalIndividualImpactfromControl = createArrayOfSize(values._currentDistribution._density.size());
DoubleArray> totalIndividualImpactfromControl_of = createArrayOfSize(values._currentDistribution._overflowAverages.size());
for (int currentIndControlIndex = 0; currentIndControlIndex < values.nIndControls; currentIndControlIndex++)
{
totalIndividualImpactfromControl.modifying().add(values.dc_dC[currentIndControlIndex][currentControlIndex].multiply(values.controlAggregationDerivatives[currentControlIndex][currentIndControlIndex]));
totalIndividualImpactfromControl_of.modifying().add(values.dc_dC_of[currentIndControlIndex][currentControlIndex].multiply(values.controlAggregationDerivatives_of[currentControlIndex][currentIndControlIndex]));
}
impliedDeterminantByAggControlDerivatives.set(totalIndividualImpactfromControl.multiply(values._currentDistribution._density).sum() + totalIndividualImpactfromControl_of.multiply(values._currentDistribution._overflowProportions).sum(), currentControlIndex);
DoubleArray> controlTargets = _config.getControlTargets().get(currentControlIndex);
final double currentControlVal = values._currentControls[currentControlIndex];
impliedDeterminantByAggControlDerivatives.at(currentControlIndex).modifying().subtract(
_functionFactory.createFunction(Arrays.asList(_config.getAggregateControls().get(currentControlIndex)), controlTargets).partialDerivative(0, new double[]{currentControlVal}).getValues().get(0));
}
for (int currentStateIndex = 0; currentStateIndex < _nAggStates; currentStateIndex++)
{
DoubleArray> totalIndividualImpact = createArrayOfSize(values._currentDistribution._density.size());
DoubleArray> totalIndividualImpact_of = createArrayOfSize(values._currentDistribution._overflowAverages.size());
for (int currentIndControlIndex = 0; currentIndControlIndex < values.nIndControls; currentIndControlIndex++)
{
DoubleArray> totalIndividualControlChange = createArrayOfSize(values._currentDistribution._density.size());
DoubleArray> totalIndividualControlChange_of = createArrayOfSize(values._currentDistribution._overflowAverages.size());
for (int currentIndStateIndex = 0; currentIndStateIndex < _nIndStates; currentIndStateIndex++)
{
// Get the derivative just of the control with respect to the ind. state
DoubleArray> indDeriv = values.dc_dx[currentIndControlIndex][currentIndStateIndex];
DoubleArray> indDeriv_of = values.dc_dx_of[currentIndControlIndex][currentIndStateIndex];
// And the change in the current state variable with respect to current aggregate
DoubleArray> indByAgg = values.dx_dX[currentStateIndex][currentIndStateIndex];
DoubleArray> indByAgg_of = values.dx_dX_of[currentStateIndex][currentIndStateIndex];
// Add the product of the two partial derivatives (chain rule)
totalIndividualControlChange.modifying().add(indDeriv.multiply(indByAgg));
totalIndividualControlChange_of.modifying().add(indDeriv_of.multiply(indByAgg_of));
}
// Add the component due to the change in the aggregate state itself
totalIndividualControlChange.modifying().add(values.dc_dX[currentIndControlIndex][currentStateIndex]);
totalIndividualControlChange_of.modifying().add(values.dc_dX_of[currentIndControlIndex][currentStateIndex]);
totalIndividualImpact.modifying().add(
totalIndividualControlChange.multiply(values.controlAggregationDerivatives[currentControlIndex][currentIndControlIndex]));
totalIndividualImpact_of.modifying().add(
totalIndividualControlChange_of.multiply(values.controlAggregationDerivatives_of[currentControlIndex][currentIndControlIndex]));
}
// Add the direct impact of the aggregate state on the aggregation
totalIndividualImpact.modifying().add(values.controlAggregationByAggDerivatives[currentControlIndex][currentStateIndex]);
totalIndividualImpact_of.modifying().add(values.controlAggregationByAggDerivatives_of[currentControlIndex][currentStateIndex]);
impliedDeterminantByAggStateDerivatives.set(totalIndividualImpact.multiply(values._currentDistribution._density).sum()
+ totalIndividualImpact_of.multiply(values._currentDistribution._overflowProportions).sum(), currentControlIndex, currentStateIndex);
}
}
/* SIGN REVERSED HERE BY SUBTRACTION
*/
// Calculate the change in the equilibrium control value from the changes in implied control values by state and control
values.dC_dX = impliedDeterminantByAggStateDerivatives.multiply(-1).across(0).
divide(impliedDeterminantByAggControlDerivatives);
}
/**
* @param values
*/
void calc_dc(DAValues values)
{
// No agg controls means no ind controls - nothing to do!
if(_nAggControls == 0) {
return;
}
/* Derive the individual controls policies by aggregate states
*/
deriveIndControls(values, values.firstAggState, _nAggStates, values.dc_dX, values.dc_dX_of);
/* Derive the individual controls policies by aggregate controls
*/
deriveIndControls(values, values.firstAggState + _nAggStates, _nAggControls, values.dc_dC,
values.dc_dC_of);
/* Derive the individual controls policies by individual states
*/
deriveIndControls(values, 0, _nIndStates, values.dc_dx, values.dc_dx_of);
}
protected void deriveIndControls(DAValues values, int baseDimension, int numDimensions, DoubleArray>[][] derivs, DoubleArray>[][] derivs_of)
{
for (int aggIndex = 0; aggIndex < numDimensions; aggIndex++)
{
// ... and is evaluated at the current aggregate
int deriveDimension = baseDimension + aggIndex;
if(values.indControlsPolicy.getDomain().getAxis(deriveDimension).numberOfElements() == 1)
{
for(int i = 0; i < values.nIndControls; i++)
{
derivs[i][aggIndex] = null;
derivs_of[i][aggIndex]= null;
}
continue;
}
DoubleArray> deriv = values.indControlsPolicy.partialDerivative(
deriveDimension, values.aggVarSelector).getValues();
if(values.nIndControls == 1)
{
derivs[0][aggIndex] = deriv;
derivs_of[0][aggIndex] = overflowValues(deriv,0,values._currentDistribution);
}
else
{
for(int i = 0; i < values.nIndControls; i++)
{
derivs[i][aggIndex] = deriv.lastDimSlice(i);
// Interpolate to get the overflow values
derivs_of[i][aggIndex] = overflowValues(derivs[i][aggIndex],0,
values._currentDistribution);
}
}
}
}
void calc_df(DAValues values_)
{
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
// If we only need a first order approximation...
if(!_secondOrder)
{
// ... it can be evaluated at the current aggregate
values_.df_dX[aggIndex] = values_.indStatesPolicy.partialDerivative(
values_.firstAggState + aggIndex, values_.aggVarSelector);
// Interpolate to get the overflow values
values_.df_dX_of[aggIndex] = interpolateFunction(_simulationStates,
values_.df_dX[aggIndex].getValues(), 0,
values_._currentDistribution._overflowAverages);
}
else // otherwise,...
{
// ... we need to evaluate it everywhere to later calculate the second derivative
values_.df_dX[aggIndex] = values_.indStatesPolicy.partialDerivative(
values_.firstAggState + aggIndex);
// Interpolate to get the overflow values - Here we can restrict!
values_.df_dX_of[aggIndex] = interpolateFunction(_simulationStates,
values_.df_dX[aggIndex].restrict(values_.aggVarSelector).getValues(), 0,
values_._currentDistribution._overflowAverages);
}
}
/* Derive individual state policies by aggregate controls
*/
for (int controlIndex = 0; controlIndex < _nAggControls; controlIndex++)
{
if(_config.getAggregateControls().get(controlIndex).numberOfElements() == 1)
{
// ... and is evaluated at the current aggregate
values_.df_dC[controlIndex] = null;
// Interpolate to get the overflow values
values_.df_dC_of[controlIndex] = null;
}
else
{
// ... and is evaluated at the current aggregate
values_.df_dC[controlIndex] = values_.indStatesPolicy.
partialDerivative(values_.firstAggState +
_nAggStates + controlIndex, values_.aggVarSelector);
// Interpolate to get the overflow values
values_.df_dC_of[controlIndex] = overflowValues(values_.df_dC[controlIndex], 0,
values_._currentDistribution);
}
}
// Restrict the policy to the aggregate variables
DoubleGridFunction restricted = values_.indStatesPolicy.restrict(values_.aggVarSelector);
/*
* Calculate the derivatives of the individual policy function w.r.t.
* the individual states
*/
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
values_.df_dx[indIndex] = restricted.partialDerivative(indIndex);
// Interpolate to get the overflow values
values_.df_dx_of[indIndex] = overflowValues(values_.df_dx[indIndex], indIndex,
values_._currentDistribution);
}
}
/**
* @param values_ The value object under construction
*/
protected void collect_dD_dc(DAValues values_)
{
for (int aggControl = 0; aggControl < _nAggControls; aggControl++)
{
for (int indControl = 0; indControl < values_.nIndControls; indControl++)
{
/*
* This gets the impact of individual controls on aggregate controls
*/
values_.controlAggregationDerivatives[aggControl][indControl] = deriveDeterminantAggregationByIndividualControl(aggControl, indControl, values_._currentShocks,values_._currentAggs, values_.indControlsPolicy.restrict(values_.aggVarSelector).getValues());
// Interpolate to get the overflow values
values_.controlAggregationDerivatives_of[aggControl][indControl] = interpolateFunction(_simulationStates, values_.controlAggregationDerivatives[aggControl][indControl], 0, values_._currentDistribution._overflowAverages);
}
/*
* This gets the impact of each aggregate state on the values of aggregate controls
*/
for (int aggState = 0; aggState < _nAggStates; aggState++)
{
values_.controlAggregationByAggDerivatives[aggControl][aggState] = deriveDeterminantAggregationByAggregateState(aggControl, aggState, values_._currentShocks,
values_._currentAggs);
// Interpolate to get the overflow values
values_.controlAggregationByAggDerivatives_of[aggControl][aggState] = interpolateFunction(_simulationStates, values_.controlAggregationByAggDerivatives[aggControl][aggState], 0, values_._currentDistribution._overflowAverages);
}
}
}
/**
* @param values_ The values structure to fill with calculated values
* @param currentShockIndexes_ The agg shocks conditional on which to perform the op
* @param simState_ The current-period distribution
* @param aggregateStates_ The current-period
*/
void calc_dx_dX(DAValues values_)
{
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
/* Then just divide through the dx/dtheta by dX/dtheta to get dx/dX
*/
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
// The rate of change of x w.r.t X as theta changes is the
// quotient of the two derivatives w.r.t. theta
values_.dx_dX[aggIndex][indIndex] =
values_.dx_dTheta[aggIndex][indIndex].divide(values_.dX_dTheta[aggIndex]);
values_.dx_dX_of[aggIndex][indIndex] =
values_.dx_dTheta_of[aggIndex][indIndex].divide(values_.dX_dTheta[aggIndex]);
}
}
}
/**
* @param values_ The value object under construction
*/
@SuppressWarnings("unchecked")
protected void calc_dX_dTheta(DAValues values_)
{
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
/* First calculate the impact of theta_k on X_k
*/
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
DoubleNaryOp tripleProd =
inputs_ -> inputs_[0]*inputs_[1]*inputs_[2];
// If either impact is null (i.e. 0 across the bord), there
// is nothing to do
if(values_.dF_dx[aggIndex][indIndex] != null &&
values_.dx_dTheta[aggIndex][indIndex] != null)
{
// Add the part from the main distribution
values_.dX_dTheta[aggIndex] +=
values_.dF_dx[aggIndex][indIndex].with(
values_.dx_dTheta[aggIndex][indIndex],
values_._currentDistribution._density).
map(tripleProd).sum();
// And the part from the overflow
values_.dX_dTheta[aggIndex] +=
values_.dF_dx_of[aggIndex][indIndex].with(
values_.dx_dTheta_of[aggIndex][indIndex],
values_._currentDistribution._overflowProportions).
map(tripleProd).sum();
}
}
}
}
/**
* @param values_ The value object under construction
*/
protected void collect_dF_dx(DAValues values_)
{
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
/*
* This gets the impact of individual states on aggregate states
*/
values_.dF_dx[aggIndex][indIndex] =
deriveAggregationByIndividualState(
aggIndex,
indIndex,
values_._currentShocks,
values_._currentAggs, true);
// Interpolate to get the overflow values
values_.dF_dx_of[aggIndex][indIndex] =
interpolateFunction(_simulationStates,
values_.dF_dx[aggIndex][indIndex],
0,
values_._currentDistribution._overflowAverages);
}
}
}
/**
* @param values_ The value object under construction
*/
protected void collect_dx_dTheta(DAValues values_)
{
for (int aggIndex = 0; aggIndex < _nAggStates; aggIndex++)
{
for (int indIndex = 0; indIndex < _nIndStates; indIndex++)
{
/*
* This gets the impact of each transformation on individual
* variables
*/
values_.dx_dTheta[aggIndex][indIndex] =
deriveIndividualTransformationByTheta(
aggIndex,
indIndex,
values_._currentShocks,
values_._currentAggs);
// Interpolate to get the overflow values
values_.dx_dTheta_of[aggIndex][indIndex] =
interpolateFunction(_simulationStates,
values_.dx_dTheta[aggIndex][indIndex],
0,
values_._currentDistribution._overflowAverages);
}
}
}
private DoubleArray> overflowValues(
DoubleGridFunction function_,
int valueIndex_,
DiscretisedDistribution distribution_)
{
return overflowValues(function_.getValues(), valueIndex_, distribution_);
}
private DoubleArray> overflowValues(
DoubleArray> function_,
int valueIndex_,
DiscretisedDistribution distribution_)
{
assert function_.numberOfDimensions() == 2 || function_.numberOfDimensions() == 3 &&
function_.size()[2] == 1;
return interpolateFunction(_simulationStates,
function_,
0,
distribution_._overflowAverages);
}
private void fillTransitionSingleLog(
DoubleArray> newTransition,
final double[] aggregateStates,
double[] futureAggregates,
DoubleArray> aggregateDerivatives)
{
// Fill the transition with the Y values
DoubleArray> futureAggsArray = DoubleArrayFactories.createArray(futureAggregates);
// Add the log of the future value
newTransition.fillDimensions(futureAggsArray, newTransition.numberOfDimensions() - 1);
// The gradients need to be adjusted to be gradients of logs
aggregateDerivatives.modifying().across(1).multiply(DoubleArrayFactories.createArray(aggregateStates)).across(0);
// Now add the part resulting from variation in the value of each
// current aggregate
for (int currentAgg = 0; currentAgg < aggregateStates.length; currentAgg++)
{
DoubleArray> aggContribution = createArrayOfSize(newTransition.size());
// Add the variation in the log of the current aggregate from the point where the approx is taken
aggContribution.fillDimensions(_config.getAggregateEndogenousStates().get(currentAgg).map(log).subtract(Math.log(aggregateStates[currentAgg])),
currentAgg);
// Now multiply by the gradients
aggContribution.modifying().across(aggContribution.numberOfDimensions() - 1).multiply(aggregateDerivatives.at(-1, currentAgg));
// Add the (log) contribution
newTransition.modifying().add(aggContribution);
}
}
private void fillTransitionLogs(
DoubleArray> newTransition,
final double[] aggregateStates,
double[] futureAggregates,
DoubleArray> aggregateDerivatives,
DoubleArray> aggregateDoubleDerivatives,
int shock_)
{
// Fill the transition with the Y values
DoubleArray> futureAggsArray = DoubleArrayFactories.createArray(futureAggregates);
// Add the log of the future value
newTransition.fillDimensions(futureAggsArray.map(log), newTransition.numberOfDimensions() - 1);
// The gradients need to be adjusted to be gradients of logs
DoubleArray> currentAggsArray = DoubleArrayFactories.createArray(aggregateStates);
aggregateDerivatives.modifying().across(1).multiply(currentAggsArray).across(0).divide(futureAggsArray);
if(_secondOrder)
{
DoubleArray> auxiliary = createArrayOfSize(aggregateDoubleDerivatives.size());
auxiliary.modifying().across(0,1).add(aggregateDerivatives).across(0,2).multiply(aggregateDerivatives);
// For double derivatives of the same index that does not work
for(int currentAgg = 0; currentAgg < aggregateStates.length; currentAgg++)
{
auxiliary.at(-1,currentAgg,currentAgg).modifying().subtract(aggregateDerivatives.at(-1,currentAgg));
}
aggregateDoubleDerivatives.modifying().across(1).multiply(currentAggsArray).across(2).multiply(currentAggsArray).across(0).divide(futureAggsArray).subtract(auxiliary);
aggregateDoubleDerivatives.modifying().divide(2d);
}
// Now add the part resulting from variation in the value of each
// current aggregate
for (int currentAgg = 0; currentAgg < futureAggregates.length; currentAgg++)
{
DoubleArray> aggContribution = createArrayOfSize(newTransition.size());
// Add the variation in the log of the current aggregate from the point where the approx is taken
aggContribution.fillDimensions(_config.getAggregateEndogenousStates().get(currentAgg).map(log).subtract(Math.log(aggregateStates[currentAgg])),
currentAgg);
// Now multiply by the gradients
aggContribution.modifying().across(aggContribution.numberOfDimensions() - 1).multiply(aggregateDerivatives.at(-1, currentAgg));
// Add the (log) contribution
newTransition.modifying().add(aggContribution);
if(_secondOrder)
{
// And finally, we also need the part for the second derivatives, if requested
for (int currentAgg2 = 0; currentAgg2 < futureAggregates.length; currentAgg2++)
{
DoubleArray> doubleAggContribution = createArrayOfSize(newTransition.size());
doubleAggContribution.fillDimensions(_config.getAggregateEndogenousStates().get(currentAgg).map(log).subtract(Math.log(aggregateStates[currentAgg])),
currentAgg);
doubleAggContribution.modifying().across(currentAgg2).multiply(_config.getAggregateEndogenousStates().get(currentAgg2).map(log).subtract(Math.log(aggregateStates[currentAgg2])));
doubleAggContribution.modifying().across(doubleAggContribution.numberOfDimensions() - 1).multiply(aggregateDoubleDerivatives.at(-1, currentAgg, currentAgg2));
// Add the (log) contribution
newTransition.modifying().add(doubleAggContribution);
}
}
}
newTransition.modifying().map((DoubleUnaryOp)Math::exp);
}
private void fillTransition(DoubleArray> newTransition, final double[] aggregateStates, double[] futureAggregates,
DoubleArray> aggregateDerivatives,DoubleArray> aggregateDoubleDerivatives)
{
// Fill the transition with the Y values
newTransition.fillDimensions(futureAggregates, newTransition.numberOfDimensions() - 1);
// Now add the part resulting from variation in the value of each
// current aggregate
for (int currentAgg = 0; currentAgg < futureAggregates.length; currentAgg++)
{
DoubleArray> aggContribution = createArrayOfSize(newTransition.size());
// Add the variation in the current aggregate from it's state where
// the approx. is taken
aggContribution.fillDimensions(_config.getAggregateEndogenousStates().get(currentAgg).subtract(aggregateStates[currentAgg]),
currentAgg);
// Now multiply by the gradients
aggContribution.modifying().across(aggContribution.numberOfDimensions() - 1).multiply(aggregateDerivatives.at(-1, currentAgg));
newTransition.modifying().add(aggContribution);
if(_secondOrder)
{
// And finally, we also need the part for the second derivatives, if requested
for (int currentAgg2 = 0; currentAgg2 < futureAggregates.length; currentAgg2++)
{
DoubleArray> doubleAggContribution = createArrayOfSize(newTransition.size());
doubleAggContribution.fillDimensions(_config.getAggregateEndogenousStates().get(currentAgg).subtract(aggregateStates[currentAgg]),
currentAgg);
doubleAggContribution.modifying().across(currentAgg2).multiply(_config.getAggregateEndogenousStates().get(currentAgg2).subtract(aggregateStates[currentAgg2]));
doubleAggContribution.modifying().across(doubleAggContribution.numberOfDimensions() - 1).multiply(aggregateDoubleDerivatives.at(-1, currentAgg, currentAgg2));
// Add the (log) contribution
newTransition.modifying().add(doubleAggContribution.divide(2d));
}
}
}
}
protected DoubleArray> deriveDeterminantAggregationByIndividualControl(int aggIndex_, int indIndex_, int[] aggExoIndex_,
double[] currentAggregates_, DoubleArray> controlPolicyAtAggs_){
// The default assumes there are no controls and hence does nothing
return null;
};
protected DoubleArray> deriveDeterminantAggregationByAggregateState(int aggIndex_, int aggState_, int[] currentShockIndexes_,
double[] aggregateStates_)
{
// The default assumes there are no controls and hence does nothing
return null;
}
protected DoubleArray> doubleDeriveAggregationByIndividualStates(int aggretationIndex_, int indIndex_, int indIndex2_, int[] aggExoIndex_,
double[] currentAggregates_)
{
return null;
}
protected DoubleArray> doubleDeriveAggregationByIndividualAndAggregateState(int aggretationIndex_, int indIndex_, int aggIndex_, int[] aggExoIndex_,
double[] currentAggregates_)
{
return null;
}
protected DoubleArray> doubleDeriveAggregationByAggregateStates(int aggretationIndex_, int aggIndex_, int aggIndex2_, int[] aggExoIndex_,
double[] currentAggregates_)
{
return null;
}
protected DoubleArray> doubleDeriveIndividualTransformationByTheta( int transformationIndex_, int transformation2Index_, int individualIndex_, int[] aggExoIndex_,
double[] currentAggregates_)
{
return null;
}
/**
* Returns an array of derivatives of the aggregation of aggregate {@code aggIndex_} by the requested individual state {@code indIndex_}. The grid should be the size of
* the simulation grid and can return a different value at each point on that grid
*
* @param aggIndex_ The index of the aggregate variable for which the aggregation should be derived
* @param indIndex_ The individual variable to derive by
* @param aggExoIndex_ The aggregate exogenous state for which to perform the calculation
* @param currentAggregates_ The current values of aggregate endogenous states
* @param current_ Indicates whether this is a current period aggregation or a next period one.
*
* @return A simulation-grid-sized array with the derived values
*/
abstract protected DoubleArray> deriveAggregationByIndividualState(
int aggIndex_, int indIndex_, int[] aggExoIndex_,
double[] currentAggregates_, boolean current_);
/**
* Returns an array of derivatives of the aggregation of aggregate {@code aggIndex_} by the other aggregate {@code stateIndex_}, which will be less than
* the former. The grid should be the size of the simulation grid and can return a different value at each point on that grid.
*
* @param aggIndex_ The index of the aggregate variable for which the aggregation should be derived
* @param stateIndex_ The aggregate variable to derive by
* @param aggExoIndex_ The aggregate exogenous state for which to perform the calculation
* @param currentAggregates_ The current values of aggregate endogenous states
*
* @return A simulation-grid-sized array with the derived values
*/
abstract protected DoubleArray> deriveAggregationByAggregateState(
int aggIndex_, int stateIndex_, int[] aggExoIndex_,
double[] currentAggregates_);
/**
* Returns an array of derivatives of the individual state{@code individualIndex_} under the transformation for aggregate {@code transformationIndex_} by its parameter θ.
* The grid should be the size of the simulation grid and can return a different value at each point on that grid.
*
* @param transformationIndex_ The index of the aggregate variable the transformation for which is to be derived
* @param individualIndex_ The individual state variable to be derived
* @param aggExoIndex_ The aggregate exogenous state index
* @param currentAggregates_ The current aggregate states
*
* @return A simulation-grid-sized array with the derived values
*/
abstract protected DoubleArray> deriveIndividualTransformationByTheta( int transformationIndex_, int individualIndex_, int[] aggExoIndex_,
double[] currentAggregates_);
public double getDerivError()
{
return _derivError;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy