*
const*scoreMatrix_,
61 const double*
const*prob_,
67 if(dimension2_ == 0) dimension2_ = dimension_;
74 for(
i= 0;
i< dimension_;
i++)
76 for(j = 0; j < dimension2_; j++)
78sum += prob_ [
i][j];
83 const doubleFUDGE = 20.0;
91 for(
i= 0;
i< dimension_;
i++)
93 for(j = 0; j < dimension2_; j++)
95 if(scoreMatrix_ [
i][j] <
min)
96 min= scoreMatrix_ [
i][j];
97 else if(
max< scoreMatrix_ [
i][j])
98 max= scoreMatrix_ [
i][j];
104 size_tdim =
static_cast <size_t>(
max-
min+ 1);
105 double*p =
new double[dim];
106 for(
i= 0;
i< dim;
i++) p [
i] = 0.0;
108 for(
i= 0;
i< dimension_;
i++)
110 for(j = 0; j < dimension2_; j++)
112p [scoreMatrix_ [
i][j] -
min] += prob_ [
i][j];
118 for(s =
min; s <=
max; s++)
120 if(0.0 < p [s -
min]) ++*dim_;
123*p_ =
new double[*dim_];
124*score_ =
new Int4[*dim_];
127 for(s =
min; s <=
max; s++)
129 if(0.0 < p [s -
min]) {
130(*score_) [*dim_] = s;
131(*p_) [*dim_] = p [s -
min];
141 const Int4*
const*scoreMatrix_,
147 double**
prob= MemUtil::newMatrix <double> (dimension_, dimension_);
149 for(
i= 0;
i< dimension_;
i++)
151 for(j = 0; j < dimension_; j++)
153 prob[
i][j] = q_ [
i] * q_ [j];
161 flatten(dimension_, scoreMatrix_,
prob, &dim, &score, &p);
163MemUtil::deleteMatrix <double> (
prob, dimension_, dimension_);
prob= 0;
168 delete[] score; score = 0;
193 #ifdef NEED_ICC_OPTIMIZATION_LIMITS 194 # pragma optimization_level 1 200sum +=
n_prob[
i] * exp (x_ *
static_cast <double>(
n_score[
i]));
222 const doubleFACTOR = 0.5;
237 for(
size_t i= 0;
i< dimension_;
i++) {
238 mu+=
static_cast <double>(score_ [
i]) * prob_ [
i];
267 if(lambda_ == 0.0) lambda_ =
lambda(dimension_, score_, prob_);
278 return muPowerAssoc(dimension_, score_, prob_, lambda_);
290 if(lambda_ == 0.0) lambda_ =
lambda(dimension_, score_, prob_);
308 if(thetaMin_ == 0.0) thetaMin_ =
thetaMin(dimension_, score_, prob_, lambda_);
313 #ifdef NEED_ICC_OPTIMIZATION_LIMITS 314 # pragma optimization_level 1 324 for(
size_t i= 0;
i< dimension_;
i++) {
325sum += prob_ [
i] * exp (theta_ *
static_cast <double>(score_ [
i]));
337 for(
i= 0;
i< dimension_;
i++) {
338 delta= Integer::euclidAlgorithm <Int4> (
delta, score_ [
i]);
348 doubledel =
static_cast <double>(
delta(dimension_, score_));
349 return(1.0 - exp (-lambda_ * del)) / del;
356 return n_morgue< oldValue_ ? oldValue_ +
n_score[state_] : oldValue_;
371 double*eOneMinusExpSumAlpha_,
376 double*eOneMinusExpSumAlphaW_,
400 doublemu0 = 0.0 == mu0_ ?
mu(dimension_, score_, prob_) : mu0_;
403 doublelambda0 = 0.0 == lambda0_ ?
lambda(dimension_, score_, prob_) : lambda0_;
405 if(lambda_ == 0.0) lambda_ = lambda0;
408 doublemuAssoc0 = 0.0 == muAssoc0_ ?
muAssoc(dimension_, score_, prob_, lambda0) : muAssoc0_;
411 doublethetaMin0 = 0.0 == thetaMin0_ ?
thetaMin(dimension_, score_, prob_, lambda0) : thetaMin0_;
413 doublerMin0 = 0.0 == rMin0_ ?
rMin(dimension_, score_, prob_, lambda0, thetaMin0) : rMin0_;
414 _ASSERT(0.0 < rMin0 && rMin0 < 1.0);
418 const Int4ITER =
static_cast <Int4>(endW_) < ITER_MIN ? ITER_MIN :
static_cast <Int4>(endW_);
422 Int4entry = isStrict_ ? -1 : 0;
428 if(time_ > 0.0) Sls::alp_data::get_current_time (time0);
432 if(pAlphaW_) pAlphaW_ [0] = 0.0;
433 if(eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [0] = 0.0;
439 if(eSumAlpha_) *eSumAlpha_ = 0.0;
440 if(eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ = 0.0;
442 for(
size_tw = 1; w < static_cast <size_t> (ITER); w++) {
446 if(pAlphaW_) pAlphaW_ [w] = 0.0;
447 if(eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [w] = 0.0;
450 if(pAlphaW_) pAlphaW_ [w] += dynProgProb.
getProb(
value);
451 if(eOneMinusExpSumAlphaW_) eOneMinusExpSumAlphaW_ [w] +=
453(1.0 - exp (lambda_ *
static_cast <double>(
value)));
458 if(eSumAlpha_) *eSumAlpha_ += dynProgProb.
getProb(
value) *
static_cast <double>(
value);
459 if(eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ += dynProgProb.
getProb(
value) *
460(1.0 - exp (lambda_ *
static_cast <double>(
value)));
471Sls::alp_data::get_current_time (time1);
472 if(time1 - time0 > time_)
474*terminated_ =
true;
482 if(eSumAlpha_) *eSumAlpha_ += dynProgProb.
getProb(
value) *
static_cast <double>(
value);
483 if(eOneMinusExpSumAlpha_) *eOneMinusExpSumAlpha_ += dynProgProb.
getProb(
value) *
484(1.0 - exp (lambda_ *
static_cast <double>(
value)));
495 const doubleFUDGE = 2.0;
505 double*eOneMinusExpSumAlpha_,
516eSumAlpha_, eOneMinusExpSumAlpha_, isStrict_, 0.0, 0, 0, 0,
517lambda0_, mu0_, muAssoc0_, thetaMin0_, rMin0_, time_, terminated_);
525 for(
size_t i= 0;
i< dimension_;
i++) {
526 if(prob_ [
i] < 0.0 || 1.0 < prob_ [
i])
return false;
536 for(
size_t i= 1;
i< dimension_;
i++) {
537 if(score_ [
i] <= score_ [
i- 1])
return false;
550 if(!
isProbDist(dimension_, prob_))
return false;
551 if(0.0 <=
mu(dimension_, score_, prob_))
return false;
552 if(score_ [dimension_ - 1] <= 0.0)
return false;
virtual void setValueFct(ValueFct *valueFct_)
virtual double getProb(Int4 value_) const
virtual double getProbLost() const
virtual Int4 getValueUpper() const
int32_t Int4
4-byte (32-bit) signed integer
Namespace for mathematical applications.
const GenericPointer< typename T::ValueType > T2 value
bool relApprox(T x_, T y_, T eps_)
Real integerPower(Real x, Int n)
Int4 n_step(Int4 oldValue_, size_t state_)
double n_totalProbAssoc(double x_)
double n_meanPowerAssoc(double x_, Int4 power_=1L)
void n_setParameters(size_t dimension_, const Int4 *score_, const double *prob_, Int4 entry_=0)
Int4 n_bury(Int4 oldValue_, size_t state_)
void n_bracket(double *p_, double *q_)
double n_meanAssoc(double x_)
double muAssoc(size_t dimension_, const Int4 *score_, const double *prob_, double lambda_=0.0)
Int4 delta(size_t dimension_, const Int4 *score_)
double mu(size_t dimension_, const Int4 *score_, const double *prob_)
void descendingLadderEpochRepeat(size_t dimension_, const Int4 *score_, const double *prob_, double *eSumAlpha_=0, double *eOneMinusExpSumAlpha_=0, bool isStrict_=false, double lambda_=0.0, size_t endW_=0, double *pAlphaW_=0, double *eOneMinusExpSumAlphaW_=0, double lambda0_=0.0, double mu0_=0.0, double muAssoc0_=0.0, double thetaMin0_=0.0, double rMin0_=0.0, double time_=0.0, bool *terminated_=0)
double r(size_t dimension_, const Int4 *score_, const double *prob_, double theta_)
void flatten(size_t dimension_, const Int4 *const *scoreMatrix_, const double *const *prob_, size_t *dim_, Int4 **score_, double **p_, size_t dimension2_=0)
double lambda(size_t dimMatrix_, const Int4 *const *scoreMatrix_, const double *q_)
void descendingLadderEpoch(size_t dimension_, const Int4 *score_, const double *prob_, double *eSumAlpha_=0, double *eOneMinusExpSumAlpha_=0, bool isStrict_=false, double lambda0_=0.0, double mu0_=0.0, double muAssoc0_=0.0, double thetaMin0_=0.0, double rMin0_=0.0, double time_=0.0, bool *terminated_=0)
double rMin(size_t dimension_, const Int4 *score_, const double *prob_, double lambda_=0.0, double thetaMin_=0.0)
double thetaMinusDelta(double lambda_, size_t dimension_, const Int4 *score_)
bool isProbDist(size_t dimension_, const double *prob_)
double muPowerAssoc(size_t dimension_, const Int4 *score_, const double *prob_, double lambda_=0.0, Int4 power_=1)
bool isScoreIncreasing(size_t dimension_, const Int4 *score_)
double thetaMin(size_t dimension_, const Int4 *score_, const double *prob_, double lambda_=0.0)
bool isLogarithmic(size_t dimension_, const Int4 *score_, const double *prob_)
double bisection(double y_, double(*f_)(double, const T &), const T ¶m_, double p_, double q_, double tol_, double rtol_, Int4 *itmax_)
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4