Commit 571917f4 authored by Alexis Lebis's avatar Alexis Lebis

Merge branch 'compHierarchy' into 'master'

Comp hierarchy rework for v2.0

See merge request !9
parents 91b9b23b 5a95a536
......@@ -10,6 +10,7 @@
#include <model/magnitude.h>
#include <model/tools.h>
#include <model/competency.h>
#include <model/competencyDistribution.h>
#include <model/decay.h>
#include <model/ea/cursus.h>
......@@ -63,11 +64,26 @@ int main(int argc, char* argv[]){
unsigned int MINPRE = parser.createParam((unsigned int)(0), "minPre", "minimal competency by course",'q',"Param").value();
unsigned int MAXPRE = parser.createParam((unsigned int)(3), "maxPre", "maximal competency by course",'Q',"Param").value();
unsigned int CBYTF = parser.createParam((unsigned int)(2), "cbyTF", "course by time frame to pick",'A',"Param").value();
unsigned int THRESHOLD_HLEVEL = parser.createParam((unsigned int)(30), "minMaxHLevelPrq", "Threshold under the one the HLevel max is used instead of min for prqs", 'y', "Param").value();
CursusEval::WEIGHT_ECTS = parser.createParam((double)(1.0), "wECTS", "Weight of ECTS in the fitness value", 'V', "Param").value();
CursusEval::WEIGHT_REPETION = parser.createParam((double)(1.0), "wREP", "Weight of Repetition in the fitness value", 'v', "Param").value();
CursusEval::WEIGHT_JOB = parser.createParam((double)(1.0), "wJob", "Weight of profession in the fitness value", 'w', "Param").value();
CursusEval::WEIGHT_PREREQ = parser.createParam((double)(1.0), "wPrereq", "Weight of prerequisites in the fitness value", 'W', "Param").value();
DecayEngine::IS_DECAY_DEACTIVATED = parser.createParam((int)(0), "decayDeactivated", "Wether or not the decay is deactivated", 'D', "Param").value();
ConstraintsProfession::DISCRETE_METRIC = parser.createParam((unsigned int)(1), "jobEvalDiscrete" , "What type of metric to use between discret and continue with mag", 'k', "Param").value();
ConstraintsPrerequisites::DISCRETE_METRIC = parser.createParam((unsigned int)(1), "prqEvalDiscrete" , "What type of metric to use between discret and continue with mag constraints", 'K', "Param").value();
ConstraintsPrerequisites::INTEGRITY_CHECK = parser.createParam((unsigned int)(1), "prqCheckFunc" , "Dev. option: switching between prereqCheck func 1 and 2", 'O', "Param").value();
ConstraintsPrerequisites::OVERFLOW_PERCENT= parser.createParam((int)(0), "overflow", "Overflow above 1 authorized during prerequisite calculation can be negative too", 'Y', "Param").value();
CompetencyDistribution::HLEVEL[0] = parser.createParam((int)(50), "hLevel0", "Comp Percentage in the HLevel 0", '0', "Param").value();
CompetencyDistribution::HLEVEL[1] = parser.createParam((int)(50), "hLevel1", "Comp Percentage in the HLevel 1", '1', "Param").value();
CompetencyDistribution::HLEVEL[2] = parser.createParam((int)(-1), "hLevel2", "Comp Percentage in the HLevel 2", '2', "Param").value();
CompetencyDistribution::HLEVEL[3] = parser.createParam((int)(-1), "hLevel3", "Comp Percentage in the HLevel 3", '3', "Param").value();
CompetencyDistribution::HLEVEL[4] = parser.createParam((int)(-1), "hLevel4", "Comp Percentage in the HLevel 4", '4', "Param").value();
CompetencyDistribution::HLEVEL[5] = parser.createParam((int)(-1), "hLevel5", "Comp Percentage in the HLevel 5", '5', "Param").value();
CompetencyDistribution::HLEVEL[6] = parser.createParam((int)(-1), "hLevel6", "Comp Percentage in the HLevel 6", '6', "Param").value();
CompetencyDistribution::HLEVEL[7] = parser.createParam((int)(-1), "hLevel7", "Comp Percentage in the HLevel 7", '7', "Param").value();
CompetencyDistribution::HLEVEL[8] = parser.createParam((int)(-1), "hLevel8", "Comp Percentage in the HLevel 8", '8', "Param").value();
CompetencyDistribution::HLEVEL[9] = parser.createParam((int)(-1), "hLevel9", "Comp Percentage in the HLevel 9", '9', "Param").value();
//PROFESSION PARAMETERS
unsigned int JOB_SEED = parser.createParam((unsigned int)(7777), "jobSeed", "Seed used for the Profession", 'g', "Param").value();
......@@ -75,7 +91,8 @@ int main(int argc, char* argv[]){
unsigned int JOB_MAXPRE = parser.createParam((unsigned int)(4), "jobMaxPre" , "maximal competency prerequisite by a job", 'J', "Param").value();
double JOB_MINMAG = parser.createParam((double)(0.5), "jobMinMag" , "miminal magnitude for a job" , 'h', "Param").value();
double JOB_MAXMAG = parser.createParam((double)(0.95), "jobMaxMag" , "maxima magnitude for a job" , 'H', "Param").value();
Profession::JOB_SELECTION_TYPE = parser.createParam((unsigned int)(0), "jobSelectType" , "Which type to use to select job", 'z', "Param").value();
//EVOLUTION ENGINE PARAMETERS
unsigned int POPSIZE = parser.createParam((unsigned int)(100), "popSize", "Population size", 'P', "Evolution Engine").value();
double PMUT = parser.createParam((double)(0.5), "pMut", "mutation rate", 'x', "Evolution Engine").value();
......@@ -85,6 +102,7 @@ int main(int argc, char* argv[]){
RATIO_RANDOM_VS_BEST = parser.createParam((unsigned int)(75), "ratioBest", "Ratio between full random and best while prereq check fails in mutation",'B',"Param").value();
// ===== PB CONFIG ZONE =====
CompetencyDistribution::sanitizeHLEVEL(); //Mandatory for HLevel
CSDVP pb;
Profession job;
std::cout << "nb cours: ---> " << NBCOURSES << std::endl;
......@@ -103,6 +121,7 @@ int main(int argc, char* argv[]){
pb.set_cfg_minimalPrerequisiteByCourse(MINPRE);
pb.set_cfg_maximalPrerequisiteByCourse(MAXPRE);
pb.set_cfg_pickedCoursesByTimeFrame(CBYTF);
pb.set_cfg_thresholdHLevelMaxOverMin(THRESHOLD_HLEVEL);
CSDVP::generateProblem(pb, CSDVP::GenerationType::RANDOM, SEED);
assert(pb.checkConfig());
......@@ -167,7 +186,7 @@ int main(int argc, char* argv[]){
//POPULATION INITIALISATION
eoPop<Cursus> pop;
Cursus c1;
for(int i = 0; i < POPSIZE; i++){
for(unsigned int i = 0; i < POPSIZE; i++){
init(c1);
eval(c1);
pop.push_back(c1);
......@@ -310,7 +329,17 @@ int main(int argc, char* argv[]){
if(localDisplay)
{
pb.displayDistribution();
std::cout << pb << std::endl;
std::cout << job << std::endl;
std::vector<Competency> compHL = CompetencyDistribution::upToHLevel(pb,2);
std::cout << "HL GTTING" << std::endl;
for(unsigned int i = 0 ; i < compHL.size(); i++)
std::cout << compHL[i] << std::endl;
std::cout << "===== CURRENT POP =====" << std::endl;
// pop.printOn(std::cout);
pop.best_element().printOn(std::cout);
std::cout << " fitness:" << pop.best_element().fitness() << std::endl;
std::cout << "Stats & metrics: \n" << std::endl;
......@@ -360,7 +389,7 @@ int main(int argc, char* argv[]){
outputfile2 << pop.size() << std::endl;
outputfile3 << pop.size() << std::endl;
for(int i=0; i<pop.size();i++){
for(unsigned int i=0; i<pop.size();i++){
//Write pop + prerequires values
pop[i].printOn(outputfile2);
outputfile2 << " " << ctrECTS.integrityCheck(pop[i]).second << " " << ctrRep.integrityCheck(pop[i]).second << " " << ctrJob.integrityCheck(pop[i]).second << " " << ctrPrq.integrityCheck(pop[i]).second << std::endl;
......@@ -381,7 +410,7 @@ int main(int argc, char* argv[]){
outputfile2 << pop.size() << std::endl;
outputfile3 << pop.size() << std::endl;
for(int i=0; i<pop.size();i++){
for(unsigned int i=0; i<pop.size();i++){
//Write pop + prerequires values
pop[i].printOn(outputfile2);
outputfile2 << " " << ctrECTS.integrityCheck(pop[i]).second << " " << ctrRep.integrityCheck(pop[i]).second << " " << ctrJob.integrityCheck(pop[i]).second << " " << ctrPrq.integrityCheck(pop[i]).second << std::endl;
......@@ -439,6 +468,10 @@ int main(int argc, char* argv[]){
std::cout << " | value: " << resPrq.second << std::endl;
std::cout << "===============" << std::endl;
ctrPrq._displayDecayedArrays(pop.best_element());
std::cout << job ;
}
// ================================= END RUN ZONE ===============================
......
......@@ -10,6 +10,7 @@ SET (EXERCICE_SOURCES
profession.cpp
problem.cpp
decay.cpp
competencyDistribution.cpp
)
ADD_LIBRARY(lModel STATIC ${EXERCICE_SOURCES})
......
......@@ -145,7 +145,7 @@ void Competency::saveDecay()
// === OPERATOR
std::ostream& operator<<(std::ostream& Stream, const Competency & c)
{
std::string s = "Competency\n\tid:"+std::to_string(c.id())+"\n\tname:"+c.c_name()+"\n\tvalue:"+std::to_string(c.competencyValue());
std::string s = "Competency\n\tid:"+std::to_string(c.id())+"\n\tname:"+c.c_name()+"\n\tvalue:"+std::to_string(c.competencyValue())+"\n\tHLevel:"+std::to_string(c.hLevel());
Stream << s ;
return Stream;
}
......
......@@ -23,7 +23,7 @@ class Competency
std::string _name;
int _howLongDecaying; //Decay related
bool _isDecaying; //Decay related
int _hLevel; // Hierarchy level (HL) of the competency. HL should onlye have lower HL comp as prereq. thus HL 0 has no prereq
//Constructor
Competency(int, Magnitude, std::string);
......@@ -64,12 +64,15 @@ class Competency
///Retrieves the name of the competency
const std::string c_name() const {return this->_name;}
std::string & name() {return this->_name;}
int hLevel() const{return this->_hLevel;}
int id() const {return this->_id;}
// === SETTER
void setMagnitude(Magnitude & m){this->_m = m;}
void setName(std::string s){this->_name = s;}
void setHL(int hl){this->_hLevel = hl;}
// === DECAY
bool isDecaying() const {return this->_isDecaying;}
......
#include <iostream>
#include <assert.h>
#include <algorithm>
#include "competencyDistribution.h"
std::vector<int> CompetencyDistribution::HLEVEL(10);
bool CompetencyDistribution::hlevelSanitized = false;
void CompetencyDistribution::distribute(CSDVP &pb)
{
if(!hlevelSanitized)
{
std::cout << "WARNING! HLevel has not been progammatically sanitized; automatically doing it" << std::endl;
CompetencyDistribution::sanitizeHLEVEL();
}
std::vector<int> idxComp;
for(int i = 0 ; i < pb.cfg_quantityCompetencies(); i++)
{
idxComp.push_back(i);
}
std::random_shuffle(idxComp.begin(), idxComp.end());
int nbAffected=0;
for(unsigned int i = 0; i < CompetencyDistribution::HLEVEL.size(); i++)
{
for(unsigned int j = 0; j < idxComp.size() && (j < (CompetencyDistribution::HLEVEL.at(i) * pb.cfg_quantityCompetencies()) / 100) ; j++)
{
pb.unlocked_competenciesCatalogue().at(idxComp.at(nbAffected)).setHL(i);
nbAffected++;
}
}
int notAffected = pb.cfg_quantityCompetencies() - nbAffected;
if(notAffected <= 0 )
return;
int hl = 0;
for(int i = 0; i < notAffected; i++)
{
assert(nbAffected+i <= pb.cfg_quantityCompetencies());
pb.unlocked_competenciesCatalogue().at(nbAffected+i).setHL(hl%CompetencyDistribution::HLEVEL.size());
hl++;
}
}
void CompetencyDistribution::linearDistribution(CSDVP &pb)
{
std::cout << "Linear distribution of competency is deprecated" << std::endl;
assert(false);
int interval = CompetencyDistribution::HLevelRange(pb);
if(pb.cfg_competencyByCourseMin() != 0) //if the min borne is not 0, we add another interval for comp with no prereq
interval++;
int nbCompByHL = pb.cfg_quantityCompetencies() / interval;
std::cout << "Interval" << interval << " -- nbCompByHL:" << nbCompByHL << std::endl;
for(int i = 0; i < pb.cfg_quantityCompetencies() && i < nbCompByHL * interval; i++)
{
pb.unlocked_competenciesCatalogue().at(i).setHL(i/nbCompByHL);
}
//Taking into account comp missed with the division
//int diff = pb.cfg_quantityCompetencies() - nbCompByHL*interval;
int idxInterval = 0;
for(int i = pb.cfg_quantityCompetencies()-1; i >= nbCompByHL*interval; i--)
{
pb.unlocked_competenciesCatalogue().at( i ).setHL(idxInterval%interval);
idxInterval++;
}
}
// === STATIC
int CompetencyDistribution::HLevelRange(CSDVP & pb)
{
assert(CompetencyDistribution::sanitizeHLEVEL);
// int interval = pb.cfg_competencyByCourseMax() - pb.cfg_competencyByCourseMin();
return CompetencyDistribution::HLEVEL.size();
}
std::vector<Competency> CompetencyDistribution::getHLevel(CSDVP & pb, int level)
{
std::vector<Competency> comp = std::vector<Competency>();
if(level < 0)
return comp;
if(level > CompetencyDistribution::HLevelRange(pb))
return comp;
std::vector<Competency> pbComp = pb.competencyCatalogue();
for(unsigned int i = 0; i < pbComp.size(); i++)
if(pbComp[i].hLevel() == level)
comp.push_back(pbComp[i]);
return comp;
}
std::vector<Competency> CompetencyDistribution::upToHLevel(CSDVP & pb, int level)
{
std::vector<Competency> res = std::vector<Competency>();
std::vector<Competency> tmp; //dry prog
int start;
for(start = 0; start <= level ; start++)
{
tmp = CompetencyDistribution::getHLevel(pb, start);
for(unsigned int i = 0; i < tmp.size(); i++)
res.push_back(tmp[i]);
}
return res;
}
std::vector<Competency> CompetencyDistribution::unassignedUpToHLevel(CSDVP & pb, int level)
{
std::vector<Competency> res;
std::vector<Competency> tmp;
//assert(level < HLEVEL.size());
for(int i = 0; i <= level; i++)
{
tmp = CompetencyDistribution::unassignedAtHLevel(pb, i);
for(unsigned int j = 0; j < tmp.size(); j++)
res.push_back(tmp[j]);
}
return res;
}
std::vector<Competency> CompetencyDistribution::unassignedAtHLevel(CSDVP &pb, int hlevel)
{
assert(hlevel >= 0);
std::vector<Competency> res;
for(unsigned int i = 0; i < pb.competencyCatalogue().size(); i++)
{
if(pb.competencyCatalogue().at(i).hLevel() == hlevel && pb.unlocked_distributedCompetencies().at(i) == -1)
res.push_back(pb.competencyCatalogue().at(i));
}
return res;
}
void CompetencyDistribution::sanitizeHLEVEL()
{
int sum = 0;
std::vector<int> tmp;
for(unsigned int i = 0; i < CompetencyDistribution::HLEVEL.size(); i++)
{
if(CompetencyDistribution::HLEVEL.at(i) >= 0) //ignoring all negative value
{
sum += CompetencyDistribution::HLEVEL.at(i);
tmp.push_back(CompetencyDistribution::HLEVEL.at(i));
}
}
assert(sum==100);
CompetencyDistribution::HLEVEL = tmp;
CompetencyDistribution::hlevelSanitized = true;
}
void CompetencyDistribution::displayHLevel()
{
std::cout << "HLEVEL:" << std::endl << "[";
for(unsigned int i = 0 ; i < CompetencyDistribution::HLEVEL.size()-1; i++)
{
std::cout << CompetencyDistribution::HLEVEL.at(i) << "|";
}
std::cout << CompetencyDistribution::HLEVEL.at(CompetencyDistribution::HLEVEL.size()-1) << "]" << std::endl << std::endl;
}
\ No newline at end of file
#ifndef SRC_COMPETENCY_DISTRIBUTION_H_
#define SRC_COMPETENCY_DISTRIBUTION_H_
#include <vector>
#include "problem.h"
#include "competency.h"
/**
* Represents the distribution of prerequisites from competencies.
*
* This is used to implement some hierarchy between competencies.
* CompetencyDistribution must be called after the competencies has been created within the problem
* and preferably before the compentecies have been affected to the courses.
*
* @author alexis.lebis
* @version 1
*/
class CompetencyDistribution
{
private:
static bool hlevelSanitized;
public:
static std::vector<int> HLEVEL;
void linearDistribution(CSDVP &);
/**
* distribute realises the competency distribution according to the HLEVEL
*/
void distribute(CSDVP &);
// === STATIC
static int HLevelRange(CSDVP &); //return the range max (starting from 0) of the HL
static std::vector<Competency> getHLevel(CSDVP &, int); // returns all the competency of a given HL
static std::vector<Competency> upToHLevel(CSDVP &, int); // retrieves all the comp comprised between [0;int] (and NOT [0;int[)
static std::vector<Competency> unassignedAtHLevel(CSDVP &, int hlevel);
static std::vector<Competency> unassignedUpToHLevel(CSDVP &, int);
/*
* sanitizeHLEVEL performs two task:
* * it verifies that the sum of all HLEVEL is equal to 100
* * it resize the HLEVEL array, eliminating all unasigned value
*/
static void sanitizeHLEVEL();
static void displayHLevel();
};
#endif // SRC_COMPETENCY_DISTRIBUTION_H_
\ No newline at end of file
......@@ -8,7 +8,7 @@ std::pair<bool, double> ConstraintsECTS::integrityCheck(Cursus indiv)
int tmpECTS = 0;
//std::cout << "courses size : " << std::to_string(courses.size()) << std::endl;
//std::cout << "courses catl : " << std::to_string(this->_pb.cfg_quantityCourses()) << std::endl;
for(int i = 0; i < indiv.size(); i++)
for(unsigned int i = 0; i < indiv.size(); i++)
{
tmpECTS += courses.at(indiv[i]).ects();
//std::cout << std::to_string(courses.at(indiv[i]).ects()) << " + ";
......
#include "prerequisitesConstraints.h"
#include <iomanip>
#include <string>
#include <utility>
#include <tuple>
#include "model/course.h"
#include "model/competency.h"
......@@ -9,26 +11,36 @@
#include "model/exception/competencyEvolvingException.h"
std::pair<bool, double> ConstraintsPrerequisites::integrityCheck(Cursus indiv)
int ConstraintsPrerequisites::DISCRETE_METRIC = 1;
int ConstraintsPrerequisites::INTEGRITY_CHECK = 1;
int ConstraintsPrerequisites::OVERFLOW_PERCENT= 0;
// @deprecated
std::pair<bool, double> ConstraintsPrerequisites::old_integrityCheck(Cursus indiv)
{
int currentTF = 0;
int notFound = 0;
int notRespected = 0;
int score = 0;
//int score = 0;
int nbPrereq = 0;
int magDivisor = 0;
double magDiff = 0;
//Each comp availble at a specific TF. Decay can be applied between i and i+1
std::vector<std::vector<Competency>> compByTF(this->_pb.timeFrames().size());
Course currentCourse;
Competency currentCompetency;
std::pair<int, int> prereqFound; prereqFound.first = 0; prereqFound.second = 0;
std::tuple<int, int, double, int> prereqFound;
std::get<0>(prereqFound) = 0; std::get<1>(prereqFound) = 0;
std::get<2>(prereqFound) = 0; std::get<3>(prereqFound) = 0;
std::pair<int, Competency> alreadyExists;
bool changedTF = false;
//std::cout << "=========START========" << std::endl;
for(int i = 0; i < indiv.size() ; i++)
for(unsigned int i = 0; i < indiv.size() ; i++)
{
if(currentTF != i / this->_pb.cfg_pickedCoursesByTimeFrame())
{
......@@ -42,7 +54,7 @@ std::pair<bool, double> ConstraintsPrerequisites::integrityCheck(Cursus indiv)
//std::cout << "Current TF: " << std::to_string(currentTF) << std::endl;
//If changedTF is set to true, then we have changed of TF, we need to make available all the Comp in TF-1 here in TF, in addition to the new that'll be discovered in TF
for(int j = 0 ; changedTF && j < compByTF.at(currentTF-1).size(); j++)
for(unsigned int j = 0 ; changedTF && j < compByTF.at(currentTF-1).size(); j++)
{
// HERE, VARIABLE DECAY CAN BE APPLIED!!!!!!!!
compByTF.at(currentTF).push_back(compByTF.at(currentTF-1).at(j));
......@@ -67,12 +79,14 @@ std::pair<bool, double> ConstraintsPrerequisites::integrityCheck(Cursus indiv)
{
prereqFound = this->_prereqsInPreviousTF(std::vector<Competency>(0), currentCourse.prerequisites());
}
notFound += prereqFound.first;
notRespected += prereqFound.second;
notFound += std::get<0>(prereqFound);
notRespected += std::get<1>(prereqFound);
magDiff += std::get<2>(prereqFound);
magDivisor += std::get<3>(prereqFound);
// Handling teached comp
for(int j = 0; j < currentCourse.teachedCompetenciesWeighted().size() ; j++)
for(unsigned int j = 0; j < currentCourse.teachedCompetenciesWeighted().size() ; j++)
{
currentCompetency = currentCourse.teachedCompetenciesWeighted().at(j).first;
......@@ -120,52 +134,510 @@ std::pair<bool, double> ConstraintsPrerequisites::integrityCheck(Cursus indiv)
std::cout << "Not Respected: " << std::to_string(notRespected) << std::endl;
std::cout << "Nb Prereq: " << std::to_string(nbPrereq) << std::endl;
*/
double metric = 0;
if(nbPrereq > 0)
switch (ConstraintsPrerequisites::DISCRETE_METRIC)
{
metric = 1.0 - ( (((double)2 * (double)notFound) + (double)notRespected ) / (2 * (double) nbPrereq) );
}
else //can't divide by 0
{
if(isOK)
metric = 1;
else
metric = 0;
case 0/* constant-expression */:
// std::cout << "MagDiff: " << magDiff << " for " << (double)magDivisor << " prereqs. (1 - " << ( magDiff / (double)magDivisor ) << std::endl;
assert(magDivisor != 0);
return std::pair<bool, double>(isOK, 1 - ( magDiff / (double)magDivisor ) );
default:
double metric = 0;
if(nbPrereq > 0)
{
metric = 1.0 - ( (((double)2 * (double)notFound) + (double)notRespected ) / (2 * (double) nbPrereq) );
}
else //can't divide by 0
{
if(isOK)
metric = 1;
else
metric = 0;
}
//std::cout << "Metric: " << std::to_string(metric) << std::endl;
//std::cout << "====================" << std::endl;
return std::pair<bool, double>(isOK, metric);
}
//std::cout << "Metric: " << std::to_string(metric) << std::endl;
//std::cout << "====================" << std::endl;
return std::pair<bool, double>(isOK, metric);
}
std::pair<int, int> ConstraintsPrerequisites::_prereqsInPreviousTF(std::vector<Competency> cInTF, std::vector<Competency> prereqs)
std::tuple<int, int, double, int> ConstraintsPrerequisites::_prereqsInPreviousTF(std::vector<Competency> cInTF, std::vector<Competency> prereqs)
{
int notFound = 0;
int notRespected = 0;
int nbFound = 0;
bool found = false;
double magDiff = 0;
int divisor = 0;
if(cInTF.size() == 0) //if empty, we'll find nothing
return std::pair<int, int>(prereqs.size(), 0);
return std::tuple<int, int, double, int>(prereqs.size(), 0, magDiff, divisor);
for(int i = 0; i < prereqs.size(); i++)
for(unsigned int i = 0; i < prereqs.size(); i++)
{
found = false;
//std::cout << "Looking for " << prereqs.at(i) << std::endl;
for(int j = 0 ; j < cInTF.size() && !found; j++)
for(unsigned int j = 0 ; j < cInTF.size() && !found; j++)
{
//std::cout << "\n\t" << cInTF.at(j) << std::endl;
if(prereqs.at(i)==cInTF.at(j))
{
nbFound++;
found = true;
if(prereqs.at(i).c_magnitude().value() > cInTF.at(j).decay())
notRespected++;
{
notRespected++;
magDiff += ( prereqs.at(i).c_magnitude().value() - cInTF.at(j).decay() ) / prereqs.at(i).c_magnitude().value();
// std::cout << "\tMag diff: " << prereqs.at(i).c_magnitude().value() - cInTF.at(j).decay() << "\t Ratio:" << magDiff << std::endl;
}
}
}
if(!found)
notFound++;
}
divisor = (notFound + nbFound);
//std::cout << "NF: " << std::to_string(notFound) << " | NR: " << std::to_string(notRespected) << std::endl;
return std::pair<int, int>(notFound, notRespected);
return std::tuple<int, int, double, int>(notFound, notRespected, magDiff, divisor);
}
// @todo repercuter les affections du decay sur tt les n+1
std::pair<bool, double> ConstraintsPrerequisites::integrityCheck(Cursus indiv)
{
assert(ConstraintsPrerequisites::OVERFLOW_PERCENT >= -100 && ConstraintsPrerequisites::OVERFLOW_PERCENT <= 100);
std::pair<bool, double> res;
std::vector<std::vector<double>> compDistribyTF; //each i contains an vector representing the magnitique of each comp existing, according to the path followed. [0;1]
std::vector<std::vector<double>> compMagbyTF;
// === init
for(unsigned int i = 0; i < this->_pb.timeFrames().size(); i++)
{
std::vector<double> tmp;
for(unsigned int j = 0; j < this->_pb.competencyCatalogue().size(); j++)
{
tmp.push_back(0);
}
compDistribyTF.push_back(tmp);
compMagbyTF.push_back(tmp);
}
// === fill each double vector of each tf according to comp seen
int currentTF = 0;
Course currentCourse;
std::vector<std::pair<Competency, double>> teachedComps;
int idx;
for(unsigned int i = 0; i < indiv.size(); i++)
{
currentTF = i / this->_pb.cfg_pickedCoursesByTimeFrame();
currentCourse = this->_pb.coursesCatalogue().at(indiv.at(i));
teachedComps = currentCourse.teachedCompetenciesWeighted();
for(unsigned int j = 0; j < teachedComps.size(); j++)
{
idx = this->_pb.mapCompToPosition(teachedComps.at(j).first);
assert(idx >= 0);
compDistribyTF.at(currentTF).at(idx) += teachedComps.at(j).first.magnitude().value();
if(compDistribyTF.at(currentTF).at(idx) > 1) //can't exceed 1 in a signle TF
compDistribyTF.at(currentTF).at(idx) = 1;
compMagbyTF.at(currentTF).at(idx) = compDistribyTF.at(currentTF).at(idx);
}
}
// === sum for t of all t-i
// for(unsigned int i = 1; i < this->_pb.timeFrames().size() ; i++)
// {
// for(unsigned int k = 0; k < this->_pb.competencyCatalogue().size() ; k++)
// {
// compDistribyTF.at(i).at(k) += compDistribyTF.at(i-1).at(k);
// if(compDistribyTF.at(i).at(k) > 1) //commented because decay ! warning it will make subsequent delta equal !! need to create a new tmp array
// compDistribyTF.at(i).at(k) = 1;
// }
// }
// === apply decay
std::vector<double> tmpDiff;
std::vector<int> decayClock(this->_pb.competencyCatalogue().size());
double decayVal = 0; double delta = 0; double decayed = 0; double current = 0;
int decaynb = 0;
for(unsigned int i = 1; i < compDistribyTF.size(); i++) //starts to 1 because 0 does not have decay
{
for(unsigned int j = 0; j < compDistribyTF.at(0).size(); j++) //cDTF[0] because we do not care which, they all have the same size == this->_pb.competencyCatalogue().size()
{
decayVal = 0; delta = 0; decayed = 0;
// storing the delta of the mag in i-1 ; i
delta = compDistribyTF.at(i).at(j) - compDistribyTF.at(i-1).at(j);
current = compMagbyTF.at(i).at(j);
if(current == 0) //if 0 -->comp not mobilized here, thus stagnation therefore decay for the jth comp
{
decayClock.at(j)++;
// decayed = compDistribyTF.at(i-1).at(j) - decayVal;
// if(decayed < 0)
// decayed = 0;
// compDistribyTF.at(i).at(j) = delta + decayed;
// // affecting the decay to this comp in upper TFs
// if(decayed > 0)
// {
// for(unsigned int k = i+1 ; k < compDistribyTF.size(); k++)
// {
// compDistribyTF.at(k).at(j) -= decayVal;
// if(compDistribyTF.at(k).at(j) < 0)
// compDistribyTF.at(k).at(j) = 0;
// }
// }
}
else
{
// if(decayClock.at(j)>0) //if there is decay
// {
// decayVal = DecayEngine::defaultDecay(decayClock.at(j));
// decaynb++;
// }
// //delta = compDistribyTF.at(i).at(j) - compDistribyTF.at(i-1).at(j);
// decayed = compDistribyTF.at(i-1).at(j) - decayVal;
// if(decayed < 0)
// decayed = 0;
// compDistribyTF.at(i).at(j) = delta + decayed;
// // affecting the decay to this comp in upper TFs
// if(decayed > 0)
// {
// for(int k = i+1 ; k < compDistribyTF.size(); k++)
// {
// compDistribyTF.at(k).at(j) -= decayVal;
// if(compDistribyTF.at(k).at(j) < 0)
// compDistribyTF.at(k).at(j) = 0;
// }
// }
decayClock.at(j) = 0; //we just reset the decay counter since we found the comp at j
}
if(decayClock.at(j)>0) //if there is decay
{
//we take the diff between decay(j) and decay(j-1) since for each stagnation we instantly repercut the decay
//decayVal = DecayEngine::defaultDecay(decayClock.at(j)) - DecayEngine::defaultDecay(decayClock.at(j)-1);
decayVal = DecayEngine::defaultDecay(decayClock.at(j));
decaynb++;
}
// if(compMagbyTF.at(i-1).at(j) == 0) //if previous value was 0, no need to apply decay in fact
// decayVal = 0;
compDistribyTF.at(i).at(j) = compMagbyTF.at(i).at(j) + compDistribyTF.at(i-1).at(j) - decayVal;
if(compDistribyTF.at(i).at(j) < 0)
compDistribyTF.at(i).at(j) = 0;
else if(compDistribyTF.at(i).at(j) > 1 + (double)ConstraintsPrerequisites::OVERFLOW_PERCENT/100)
compDistribyTF.at(i).at(j) = 1 + (double)ConstraintsPrerequisites::OVERFLOW_PERCENT/100;
// if(compDistribyTF.at(i-1).at(j) != 0) //if previous value was 0, no need to apply decay in fact
// {
// compDistribyTF.at(i).at(j) -= decayVal;
// if(compDistribyTF.at(i).at(j) <= 0)
// compDistribyTF.at(i).at(j) = 0;
// //repercuting the decay to in the upper TF
// for(unsigned int k = i + 1; k < compDistribyTF.size(); k++)
// {
// compDistribyTF.at(k).at(j) -= decayVal;
// if(compDistribyTF.at(k).at(j) < 0)
// compDistribyTF.at(k).at(j) = 0;
// }
// }
}
}
// === checking courses prerequisite
int notFound = 0; int notRespected = 0; int nbPrereq = 0;
double magDiff = 0; // int magDivisor = 0;
for(unsigned int i = 0; i < indiv.size(); i++)
{
currentTF = i / this->_pb.cfg_pickedCoursesByTimeFrame();
assert(indiv.at(i) < this->_pb.coursesCatalogue().size());
currentCourse = this->_pb.coursesCatalogue().at(indiv.at(i));
for(unsigned int j = 0; j < currentCourse.prerequisites().size(); j++)
{
nbPrereq++;
idx = this->_pb.mapCompToPosition(currentCourse.prerequisites().at(j));
if( currentTF == 0 || //if we have prereq at tf 0 -> automatically fails !
compDistribyTF.at(currentTF-1).at(idx) < currentCourse.prerequisites().at(j).c_magnitude().value())
{
// PREREQ KO !
if(currentTF == 0 || compDistribyTF.at(currentTF-1).at(idx) == 0)
{
magDiff += currentCourse.prerequisites().at(j).c_magnitude().value(); //not exists so full value in diff here
notFound++;
}
else
{
magDiff += ( currentCourse.prerequisites().at(j).c_magnitude().value() - compDistribyTF.at(currentTF-1).at(idx) ) / currentCourse.prerequisites().at(j).c_magnitude().value(); // percentage of diff in [0;1]
notRespected++;
}
}
else
{
// PREREQ OK !
}
}
}
bool isOK = ((notFound == 0) && (notRespected == 0));
switch (ConstraintsPrerequisites::DISCRETE_METRIC)
{
case 0/* constant-expression */:
// std::cout << "MagDiff: " << magDiff << " for " << (double)nbPrereq << " prereqs. (1 - " << ( magDiff / (double)nbPrereq ) << std::endl;
// assert(magDivisor != 0);
return std::pair<bool, double>(isOK, 1 - ( magDiff / (double)nbPrereq ) );
default:
double metric = 0;
if(nbPrereq > 0)
{
metric = 1.0 - ( (((double)2 * (double)notFound) + (double)notRespected ) / (2 * (double) nbPrereq) );
}
else //can't divide by 0
{
if(isOK)
metric = 1;
else
metric = 0;
}
//std::cout << "Metric: " << std::to_string(metric) << std::endl;
//std::cout << "====================" << std::endl;
return std::pair<bool, double>(isOK, metric);
}
}
void ConstraintsPrerequisites::_displayDecayedArrays(Cursus indiv)
{
std::pair<bool, double> res;
std::vector<std::vector<double>> compDistribyTF; //each i contains an vector representing the magnitique of each comp existing, according to the path followed. [0;1]
std::vector<std::vector<double>> compMagbyTF;
// === init
for(unsigned int i = 0; i < this->_pb.timeFrames().size(); i++)
{
std::vector<double> tmp;
for(unsigned int j = 0; j < this->_pb.competencyCatalogue().size(); j++)
{
tmp.push_back(0);
}
compDistribyTF.push_back(tmp);
compMagbyTF.push_back(tmp);
}
// === fill each double vector of each tf according to comp seen
int currentTF = 0;
Course currentCourse;
std::vector<std::pair<Competency, double>> teachedComps;
int idx;
for(unsigned int i = 0; i < indiv.size(); i++)
{
currentTF = i / this->_pb.cfg_pickedCoursesByTimeFrame();
currentCourse = this->_pb.coursesCatalogue().at(indiv.at(i));
teachedComps = currentCourse.teachedCompetenciesWeighted();
for(unsigned int j = 0; j < teachedComps.size(); j++)
{
idx = this->_pb.mapCompToPosition(teachedComps.at(j).first);
assert(idx >= 0);
compDistribyTF.at(currentTF).at(idx) += teachedComps.at(j).first.magnitude().value();
if(compDistribyTF.at(currentTF).at(idx) > 1) //can't exceed 1 in a signle TF
compDistribyTF.at(currentTF).at(idx) = 1;
compMagbyTF.at(currentTF).at(idx) = compDistribyTF.at(currentTF).at(idx);
}
}
// === Comp by TF display section
std::cout << "\n* Competency value by TF" << std::endl;
for(unsigned int i = 0 ; i < compDistribyTF.size(); i++)
{
std::cout << "TF#" << i << ": [";
for(unsigned int j = 0; j < compDistribyTF.at(i).size()-1; j++)
{
std::cout << std::setprecision(3) << compDistribyTF.at(i).at(j) << ";\t";
}
std::cout << compDistribyTF.at(i).at(compDistribyTF.at(i).size()-1) << "]" << std::endl;
}
// === sum for t of all t-i
// for(unsigned int i = 1; i < this->_pb.timeFrames().size() ; i++)
// {
// // for(int j = 0; j < i; j++)
// // {
// for(unsigned int k = 0; k < this->_pb.competencyCatalogue().size() ; k++)
// {
// compDistribyTF.at(i).at(k) += compDistribyTF.at(i-1).at(k);
// if(compDistribyTF.at(i).at(k) > 1) //commented because decay
// compDistribyTF.at(i).at(k) = 1;
// }
// // }
// }
// === Summed display section
std::cout << "\n* Summed value by TF" << std::endl;
for(unsigned int i = 0 ; i < compDistribyTF.size(); i++)
{
std::cout << "TF#" << i << ": [";
for(unsigned int j = 0; j < compDistribyTF.at(i).size()-1; j++)
{
std::cout << std::setprecision(3) << compDistribyTF.at(i).at(j) << ";\t";
}
std::cout << compDistribyTF.at(i).at(compDistribyTF.at(i).size()-1) << "]" << std::endl;
}
// === apply decay
std::vector<double> tmpDiff;
std::vector<int> decayClock(this->_pb.competencyCatalogue().size());
double decayVal = 0; double delta = 0; double decayed = 0; double current = 0;
int decaynb = 0;
for(unsigned int i = 1; i < compDistribyTF.size(); i++) //starts to 1 because 0 does not have decay
{
for(unsigned int j = 0; j < compDistribyTF.at(0).size(); j++) //cDTF[0] because we do not care which, they all have the same size == this->_pb.competencyCatalogue().size()
{
decayVal = 0; delta = 0; decayed = 0;
// storing the delta of the mag in i-1 ; i
delta = compDistribyTF.at(i).at(j) - compDistribyTF.at(i-1).at(j);
current = compMagbyTF.at(i).at(j);
if(current == 0) //if 0 -->comp not mobilized here, thus stagnation therefore decay for the jth comp
{
decayClock.at(j)++;
// decayed = compDistribyTF.at(i-1).at(j) - decayVal;
// if(decayed < 0)
// decayed = 0;
// compDistribyTF.at(i).at(j) = delta + decayed;
// // affecting the decay to this comp in upper TFs
// if(decayed > 0)
// {
// for(unsigned int k = i+1 ; k < compDistribyTF.size(); k++)
// {
// compDistribyTF.at(k).at(j) -= decayVal;
// if(compDistribyTF.at(k).at(j) < 0)
// compDistribyTF.at(k).at(j) = 0;
// }
// }
}
else
{
// if(decayClock.at(j)>0) //if there is decay
// {
// decayVal = DecayEngine::defaultDecay(decayClock.at(j));
// decaynb++;
// }
// //delta = compDistribyTF.at(i).at(j) - compDistribyTF.at(i-1).at(j);
// decayed = compDistribyTF.at(i-1).at(j) - decayVal;
// if(decayed < 0)
// decayed = 0;
// compDistribyTF.at(i).at(j) = delta + decayed;
// // affecting the decay to this comp in upper TFs
// if(decayed > 0)
// {
// for(int k = i+1 ; k < compDistribyTF.size(); k++)
// {
// compDistribyTF.at(k).at(j) -= decayVal;
// if(compDistribyTF.at(k).at(j) < 0)
// compDistribyTF.at(k).at(j) = 0;
// }
// }
decayClock.at(j) = 0; //we just reset the decay counter since we found the comp at j
}
if(decayClock.at(j)>0) //if there is decay
{
//we take the diff between decay(j) and decay(j-1) since for each stagnation we instantly repercut the decay
//decayVal = DecayEngine::defaultDecay(decayClock.at(j)) - DecayEngine::defaultDecay(decayClock.at(j)-1);
decayVal = DecayEngine::defaultDecay(decayClock.at(j));
decaynb++;
}
// if(compMagbyTF.at(i-1).at(j) == 0) //if previous value was 0, no need to apply decay in fact
// decayVal = 0;
compDistribyTF.at(i).at(j) = compMagbyTF.at(i).at(j) + compDistribyTF.at(i-1).at(j) - decayVal;
if(compDistribyTF.at(i).at(j) < 0)
compDistribyTF.at(i).at(j) = 0;
else if(compDistribyTF.at(i).at(j) > 1 + (double)ConstraintsPrerequisites::OVERFLOW_PERCENT/100)
compDistribyTF.at(i).at(j) = 1 + (double)ConstraintsPrerequisites::OVERFLOW_PERCENT/100;
// if(compDistribyTF.at(i-1).at(j) != 0)
// {
// compDistribyTF.at(i).at(j) -= decayVal;
// if(compDistribyTF.at(i).at(j) <= 0)
// compDistribyTF.at(i).at(j) = 0;
// //repercuting the decay to in the upper TF
// for(unsigned int k = i + 1; k < compDistribyTF.size(); k++)
// {
// compDistribyTF.at(k).at(j) -= decayVal;
// if(compDistribyTF.at(k).at(j) < 0)
// compDistribyTF.at(k).at(j) = 0;
// }
// }
}
}
//=== Decay Display Section
std::cout << "\n* Decayed value by TF" << std::endl;
for(unsigned int i = 0 ; i < compDistribyTF.size(); i++)
{
std::cout << "TF#" << i << ": [";
for(unsigned int j = 0; j < compDistribyTF.at(i).size()-1; j++)
{
std::cout << std::setprecision(3) << compDistribyTF.at(i).at(j) << ";\t";
}
std::cout << compDistribyTF.at(i).at(compDistribyTF.at(i).size()-1) << "]" << std::endl;
}
}
......@@ -3,6 +3,7 @@
#include <vector>
#include <utility>
#include <tuple>
#include <eo>
......@@ -25,19 +26,37 @@ class ConstraintsPrerequisites
/**
* Checks the prerequisites prereqs in the competencies cInTF (generally, the competencies from the previous TF).
* It returns a std::pair, where the first element indicates how many prerequisites HAS NOT BEEN FOUND.
* It returns a std::tuple, where the first element indicates how many prerequisites HAS NOT BEEN FOUND.
* The second elements indicates how many prerequisites has not enought mastery (BUT EXISTS in cInTF!)
* The third element is the continous metric calculated accordingly the difference of mag between cours prereqs and comp mag
* The fourth element is to be used to divide the third one (how many elements contributed to establish the mag diff)
*/
std::pair<int,int> _prereqsInPreviousTF(std::vector<Competency> cInTF, std::vector<Competency> prereqs);
std::tuple<int,int, double, int> _prereqsInPreviousTF(std::vector<Competency> cInTF, std::vector<Competency> prereqs);
public:
static int DISCRETE_METRIC;
static int INTEGRITY_CHECK;
static int OVERFLOW_PERCENT;
ConstraintsPrerequisites(const CSDVP & csdvp, const Profession & job)
: _pb(csdvp), _job(job) {}
/** Integrity check is used to investigate wheteher or not one indiv respects the constraints represented by THIS.
* Returns a std::pair. First is a boolean set to true when the indiv passes the test and therefore is compilant with the constraint, false otherwise. Second is the associated metric, mostly usable during fitness calcul.
* @deprecated
* @todo Decay competency magnitude
*/
std::pair<bool, double> integrityCheck(Cursus indiv);
std::pair<bool, double> old_integrityCheck(Cursus indiv);
/**
* 2nd version of integrity check. Supposed to be more reliable and faster
*/
std::pair<bool, double> integrityCheck(Cursus indiv);
/**
* Displayer debug purpose. Display decayed array and non decayed array
* This is mostly a duplicata of integrityCheck without the prerequisite check and display
*/
void _displayDecayedArrays(Cursus indiv);
};
#endif // SRC_MODEL_CONSTRAINTS_PREREQUISITES_CONSTRAINTS_H_
\ No newline at end of file
......@@ -9,11 +9,13 @@
#include "model/exception/competencyEvolvingException.h"
int ConstraintsProfession::DISCRETE_METRIC = 1;
std::pair<bool, double> ConstraintsProfession::integrityCheck(Cursus indiv)
{
std::vector<Competency> compToAnswer;
for(int i = 0 ; i < this->_job.prerequisites().size(); i++)
for(unsigned int i = 0 ; i < this->_job.prerequisites().size(); i++)
{
std::string name = this->_job.prerequisites().at(i).c_name();
compToAnswer.push_back(Competency::buildTMP(0, name)); //same name to exploit the Competency::operator== on name equality
......@@ -26,7 +28,7 @@ std::pair<bool, double> ConstraintsProfession::integrityCheck(Cursus indiv)
bool changedTF = false;
int currentTF = 0;
for(int i = 0 ; i < indiv.size(); i++)
for(unsigned int i = 0 ; i < indiv.size(); i++)
{
current = this->_pb.coursesCatalogue().at(indiv.at(i));
......@@ -36,12 +38,12 @@ std::pair<bool, double> ConstraintsProfession::integrityCheck(Cursus indiv)
changedTF = false;
currentTF = i / this->_pb.cfg_pickedCoursesByTimeFrame();
for(int j = 0; j < compToAnswer.size(); j++)
for(unsigned int j = 0; j < compToAnswer.size(); j++)
{
compToAnswer.at(j).increaseDecay();
}
for(int j = 0 ; j < current.teachedCompetenciesWeighted().size() ; j++)
for(unsigned int j = 0 ; j < current.teachedCompetenciesWeighted().size() ; j++)
{
currentComp = current.teachedCompetenciesWeighted().at(j).first;
......@@ -63,7 +65,7 @@ std::pair<bool, double> ConstraintsProfession::integrityCheck(Cursus indiv)
}
}
for(int i = 0; i < compToAnswer.size(); i++)
for(unsigned int i = 0; i < compToAnswer.size(); i++)
{
compToAnswer.at(i).saveDecay();
//std::cout << compToAnswer.at(i) << std::endl;
......@@ -72,16 +74,26 @@ std::pair<bool, double> ConstraintsProfession::integrityCheck(Cursus indiv)
//Now that we have evolve all the tmp competency, we compate their mag to the requirement. We count how many is not met to define the metric
int score = 0;
for(int i = 0; i < this->_job.prerequisites().size(); i++)
double magDiff = 0; // addendum from HL
for(unsigned int i = 0; i < this->_job.prerequisites().size(); i++)
{
if(compToAnswer.at(i).magnitude().value() < this->_job.prerequisites().at(i).c_magnitude().value())
{
score++;
magDiff += ( this->_job.prerequisites().at(i).c_magnitude().value() - compToAnswer.at(i).magnitude().value() ) / this->_job.prerequisites().at(i).c_magnitude().value() ;
}
}
//std::cout << "Score: " << std::to_string(score) << std::endl;
//std::cout << "Size: " << std::to_string(compToAnswer.size()) << std::endl;
bool res = score == 0;
return std::pair<bool, double>(res, 1 - ( (double)score / (double)compToAnswer.size()));
switch (ConstraintsProfession::DISCRETE_METRIC) //whether we use discrete or continue metrics
{
case 0:
return std::pair<bool, double>(res, 1 - ( magDiff / (double)compToAnswer.size() ) );
default:
return std::pair<bool, double>(res, 1 - ( (double)score / (double)compToAnswer.size()));
}
}
......@@ -22,6 +22,8 @@ class ConstraintsProfession
Profession _job;
public:
static int DISCRETE_METRIC;
ConstraintsProfession(const CSDVP & csdvp, const Profession & job)
: _pb(csdvp), _job(job) {}
......
......@@ -6,9 +6,9 @@ std::pair<bool, double> ConstraintsRepetition::integrityCheck(Cursus indiv)
{
int nbOfRepetition = 0;
for(int i = 0; i < indiv.size(); i++)
for(unsigned int i = 0; i < indiv.size(); i++)
{
for(int j = i+1; j < indiv.size(); j++)
for(unsigned int j = i+1; j < indiv.size(); j++)
{
if(indiv.at(i) == indiv.at(j))
nbOfRepetition++;
......
......@@ -53,6 +53,15 @@ Course::Course(int id, int ects, std::string name)
// === GETTER
//cf. course.h
const int Course::lastTimeFrame() const
{
int max = this->_temporalAvailability[0];
for(unsigned int i = 1; i < this->_temporalAvailability.size(); i++)
if(max < this->_temporalAvailability[i])
max = this->_temporalAvailability[i];
return max;
}
// === END GETTER
// === MUTATOR
......
......@@ -81,6 +81,7 @@ class Course
std::vector<Competency>& unlocked_prerequisites() {return this->_prerequisites;}
const std::vector<int> timeFrame() const {return this->_temporalAvailability;}
const std::vector<std::pair<Competency, double>> teachedCompetenciesWeighted() const{return this->_weightedTeached;}
const int lastTimeFrame() const;
// === MUTATOR
// SETTER
......
......@@ -19,6 +19,7 @@ class DecayEngine
if(t == 0)
return 0;
return (exp(t / 1.25) + 5)/100;
//return (double)t/10;
}
};
......
......@@ -26,6 +26,7 @@ void CursusEval::operator()(Cursus & _cursus){
resCP=cp.integrityCheck(_cursus);
resCR=cr.integrityCheck(_cursus);
resCPR=cpr.integrityCheck(_cursus);
/*
std::cout << "EVAL: ";
std::cout << resCE.first << " " << resCE.second << std::endl;
......
......@@ -34,9 +34,9 @@ class eoInitConstraintCSDVP: public eoInit<EOT>
sizeTF=chromSize/nbTF;
catalogue = pb.coursesCatalogue();
possibleIDbyTF.resize(nbTF);
for(int i=0; i<maxVal; i++){
for(unsigned int i=0; i<maxVal; i++){
tmp=catalogue[i].timeFrame();
for(int j=0; j<tmp.size(); j++){
for(unsigned int j=0; j<tmp.size(); j++){
possibleIDbyTF[tmp[j]-pb.cfg_minimalTimeFrame()].push_back(i);
}
}
......@@ -50,13 +50,13 @@ class eoInitConstraintCSDVP: public eoInit<EOT>
}
virtual void operator()(EOT& chrom){
int cpt=0;
unsigned int cpt=0;
//std::cout << "Enter init" << std::endl;
unsigned int r=eo::rng.random(possibleIDbyTF[0].size());
chrom.resize(0);
chrom.push_back(possibleIDbyTF[0][r]);
//std::cout << "push " << possibleIDbyTF[0][r] << std::endl;
for(int i = 1; i < chromSize; i++){
for(unsigned int i = 1; i < chromSize; i++){
cpt=0;
r=eo::rng.random(possibleIDbyTF[i/sizeTF].size());
while(!notin(chrom, possibleIDbyTF[i/sizeTF][r], i) && cpt<maxVal){
......
......@@ -24,8 +24,8 @@ class mutCSDVP: public eoMonOp<EOT>
//_CourseID subastraction from _chrom
void notin(std::vector<int>& _courseID, std::vector<int>& _chrom){
int tmp;
for (int i=0; i<_chrom.size(); i++){
unsigned int tmp;
for (unsigned int i=0; i<_chrom.size(); i++){
tmp=0;
while(tmp<_courseID.size() && _chrom[i]!=_courseID[tmp])
tmp++;
......@@ -230,7 +230,7 @@ class mutCSDVP: public eoMonOp<EOT>
bool changedTF = false;
int currentTF = 0;
for(int i = 0; i < _chrom.size() && (i / nbCbyTF < TF); i++)
for(unsigned int i = 0; i < _chrom.size() && (i / nbCbyTF < TF); i++)
{
currentCourse = catalogue.at(_chrom.at(i));
......@@ -242,13 +242,13 @@ class mutCSDVP: public eoMonOp<EOT>
if(changedTF) //if we have changed of tf, lets improve decay for all comp
{
for(int j = 0; j < tmpCourse.prerequisites().size(); j++)
for(unsigned int j = 0; j < tmpCourse.prerequisites().size(); j++)
{
tmpCourse.unlocked_prerequisites().at(j).increaseDecay();
}
}
for(int j = 0; j < currentCourse.teachedCompetenciesWeighted().size(); j++)
for(unsigned int j = 0; j < currentCourse.teachedCompetenciesWeighted().size(); j++)
{
tmpComp = currentCourse.teachedCompetenciesWeighted().at(j).first;
addStatus = tmpCourse.addPrerequisite(tmpComp);
......@@ -275,7 +275,7 @@ class mutCSDVP: public eoMonOp<EOT>
}
compStatus = tmpCourse.prerequisites();
for(int i = 0; i < compStatus.size(); i++)
for(unsigned int i = 0; i < compStatus.size(); i++)
compStatus.at(i).saveDecay();
return compStatus;
......@@ -292,7 +292,7 @@ class mutCSDVP: public eoMonOp<EOT>
Competency checkCmp;
for(int i = 0; i < availableC.size(); i++)
for(unsigned int i = 0; i < availableC.size(); i++)
{
isPrereqOK = true;
......
......@@ -5,9 +5,11 @@
#include <cstdlib>
#include <queue>
#include <cassert>
#include <tuple>
#include "problem.h"
#include "tools.h"
#include "competencyDistribution.h"
#include "exception/csdvpOverlapingBoundaryException.h"
#include "exception/notImplementedException.h"
......@@ -76,6 +78,8 @@ int CSDVP::CSDVP_COUNTER = 0;
{
this->_maximalMagnitude = Magnitude::build(m);
}
void CSDVP::set_cfg_thresholdHLevelMaxOverMin(int thr)
{this->_thresholdMinMaxHLevel = thr;}
void CSDVP::setTimeFrames(std::vector<int> & v)
{this->_timeFrames = v;}
......@@ -83,6 +87,7 @@ int CSDVP::CSDVP_COUNTER = 0;
{this->_availableCourses = c;}
void CSDVP::setCompetenciesCatalogue(std::vector<Competency> & c)
{this->_availableCompentecies = c;}
// ADDER
bool CSDVP::addTimeFrame(int tF)
{
......@@ -187,6 +192,16 @@ int CSDVP::CSDVP_COUNTER = 0;
return i;
return -1;
}
int CSDVP::mapCompToPosition(const Competency & comp)
{
for(unsigned int i = 0; i < this->competencyCatalogue().size();i++)
{
if(comp == this->competencyCatalogue().at(i))
return i;
}
return -1;
}
// === END FUNC
// === STATIC
......@@ -349,6 +364,22 @@ int CSDVP::CSDVP_COUNTER = 0;
pb.addCompetencyToCatalogue(c);
assert(c == pb.competencyCatalogue().at(pb.competencyCatalogue().size()-1));
}
/* Creating _distributedCompetencies array for assignation */
for(unsigned int i = 0; i < pb.competencyCatalogue().size(); i++)
{
pb._distributedCompetencies.push_back(-1);
}
assert (pb._distributedCompetencies.size() == pb.competencyCatalogue().size());
/* Assigning Hierachy Level (HL) for each comp
* HL is used to improve the average quality of the course catalogue compared to random
*/
CompetencyDistribution distr = CompetencyDistribution();
//distr.linearDistribution(pb);
distr.distribute(pb);
/* COMPETENCY ASSIGNATION FOR TEACHED
* For each course c, we roll x, the nb of competencies associated to c.
* To assign a competency to c exhaustively, we create a tmp competency vector v, where the competencies are randomly sorted, then create a queue from it.
......@@ -359,7 +390,17 @@ int CSDVP::CSDVP_COUNTER = 0;
std::queue<Competency> queue;
for(unsigned int i = 0 ; i < randomVec.size(); i++)
queue.push(randomVec.at(i));
/*
* ADDENDUM:
* We use the HierarchyLevel to condition the assignation of competency.
* A course at level i (defined by its biggest TF) cannot have a comp with HL > i*HLrange / NbTF
*/
int lastTF;
int nbTF = pb.timeFrames().size();
int hLevelR = CompetencyDistribution::HLevelRange(pb);
int maxLevel; //used to identify which HL are authorized;
std::vector<Competency> HLComp;
int x;
Competency tmpComp;
std::pair<Competency, double> teachedComp;
......@@ -367,54 +408,190 @@ int CSDVP::CSDVP_COUNTER = 0;
for(unsigned int i = 0; i < pb.coursesCatalogue().size(); i++)
{
x = _randomizeIn(pb.cfg_competencyByCourseMin(), pb.cfg_competencyByCourseMax());
for(int j = 0; j < x; j++)
lastTF = pb.coursesCatalogue().at(i).lastTimeFrame();
maxLevel = lastTF * hLevelR / nbTF;
HLComp = CompetencyDistribution::unassignedUpToHLevel(pb, maxLevel);
if(HLComp.size() == 0) //if there is no more HL unassigned, it doesnot matter which one we take
HLComp = CompetencyDistribution::upToHLevel(pb, maxLevel);
std::random_shuffle(HLComp.begin(), HLComp.end());
// std::cout << "SIZE OF HLCOMP : " << HLComp.size() << std::endl;
for(unsigned int j = 0; j < x && HLComp.size() > 0 && j < HLComp.size(); j++)
{
tmpComp = queue.front();
queue.pop();queue.push(tmpComp);
magVal = pb.cfg_magnitudeMin().value() + ( (double)rand()/RAND_MAX) * ( pb.cfg_magnitudeMax().value() - pb.cfg_magnitudeMin().value()) ;
Competency cpt = Competency::build(magVal,tmpComp.c_name());
teachedComp = std::pair<Competency,double>(cpt, 1.0);
pb.unlocked_coursesCatalogue().at(i).addTeachedComp(teachedComp);
// tmpComp = queue.front();
tmpComp = HLComp.at(j);
pb._sourceCourseTeachedComp(pb, i, tmpComp);
}
}
/* COMPETENCY ASSIGNATION FOR PREREQ
* IDEM AS ABOVE
*/
/*
* ADDENDUM :
* Specific behavior for cour level 0: no prereq
* Prereq for level i only draw from HL-1
*
* WARNING :
* Such a behavior induces a strong hypothesis on how courses draw their comp!
*
*/
/*
* NEXT TO DO :
* Array of boolean : true comp assignée ; évolution un tableau de -1 si pas assigné et [0;1] pour les magn des comp à chaque case puis assigné decay sur l'ensemble du tab -> utile pour borner les mag max du job
* POur prereq juste prendre les comp a true
* +
* Vecteur pour le HLevel en entree en % [20,30,10,10,30] (x cases) a passer en dur
* +
* Comp prereq metric comme job metric continue
*/
std::random_shuffle(randomVec.begin(), randomVec.end());
queue = std::queue<Competency>();
for(unsigned int i = 0 ; i < randomVec.size(); i++)
queue.push(randomVec.at(i));
int firstTF = 0; int randVal = 0;
//int threshold = 30;
for(unsigned int i = 0; i < pb.coursesCatalogue().size(); i++)
{
x = _randomizeIn(pb.cfg_prerequisiteByCourseMin(), pb.cfg_prerequisiteByCourseMax());
for(int j = 0; j < x; j++)
lastTF = pb.coursesCatalogue().at(i).lastTimeFrame();
firstTF = pb.coursesCatalogue().at(i).timeFrame().at(0);
randVal = rand() % (100);
if( x < pb.cfg_thresholdHLevelMaxOverMin())
maxLevel = lastTF * hLevelR / nbTF;
else
maxLevel = firstTF * hLevelR / nbTF;
//maxLevel = lastTF * hLevelR / nbTF;
maxLevel--; // Logically, prerequisite can only be according to comp of lower HL
if(maxLevel >= 0) // then this means we are dealing with at least a HL 1, so only HL below can serve as prereq
{
tmpComp = queue.front();
queue.pop();
//we change mag value for prereq
magVal = pb.cfg_magnitudeMin().value() + ( (double)rand()/RAND_MAX) * ( pb.cfg_magnitudeMax().value() - pb.cfg_magnitudeMin().value()) ;
Competency cpt = Competency::build(magVal,tmpComp.c_name());
pb.unlocked_coursesCatalogue().at(i).addPrerequisite(cpt);
queue.push(tmpComp);
HLComp = CompetencyDistribution::upToHLevel(pb, maxLevel);
std::random_shuffle(HLComp.begin(), HLComp.end());
for(unsigned int j = 0; j < x && HLComp.size() > 0 && j < HLComp.size(); j++)
{
tmpComp = HLComp.at(j);
//we change mag value for prereq
magVal = pb.cfg_magnitudeMin().value() + ( (double)rand()/RAND_MAX) * ( pb.cfg_magnitudeMax().value() - pb.cfg_magnitudeMin().value()) ;
Competency cpt = Competency::build(magVal,tmpComp.c_name());
pb.unlocked_coursesCatalogue().at(i).addPrerequisite(cpt);
queue.push(tmpComp);
}
}
}
}
void CSDVP::_sourceCourseTeachedComp(CSDVP & pb, unsigned int idx, Competency & c)
{
double magVal;
magVal = pb.cfg_magnitudeMin().value() + ( (double)rand()/RAND_MAX) * ( pb.cfg_magnitudeMax().value() - pb.cfg_magnitudeMin().value()) ;
Competency cpt = Competency::build(magVal,c.c_name());
std::pair<Competency, double> teachedComp = std::pair<Competency,double>(cpt, 1.0);
pb.unlocked_coursesCatalogue().at(idx).addTeachedComp(teachedComp);
_updateDistribComp(pb, cpt);
}
void CSDVP::_updateDistribComp(CSDVP & pb, Competency & cpt)
{
int idx = pb.mapCompToPosition(cpt);
assert (idx >= 0);
if(pb._distributedCompetencies.at(idx) == -1) //not yet assigned? Then first 0
pb._distributedCompetencies.at(idx) = 0;
pb._distributedCompetencies.at(idx) += cpt.magnitude().value();
// Overflow in distributedCompetencies is not an issue since it tracks how many magnitude has been put into a comp
// if(pb._distributedCompetencies.at(idx) > 1) //if range overflow --> rebase to one
// pb._distributedCompetencies.at(idx) = 1;
}
// --------- END GENERATION RELATED FUNCTIONS ---------
// === END STATIC
void const CSDVP::displayDistribution(){
std::cout << "------------------------------" << std::endl;
std::cout << "| PB Competency distribution |" << std::endl;
std::cout << "------------------------------" << std::endl;
std::tuple<int, int, double, double> stats = this->distributionStats();
std::cout << "Stats: " << std::endl ;
std::cout << "\n\t# of Comp: " << this->_distributedCompetencies.size();
std::cout << "\n\t# of Unassigned: " << std::get<0>(stats);
std::cout << "\n\t# of above 0.5: " << std::get<1>(stats);
std::cout << "\n\tDistrib mean: " << std::get<2>(stats);
std::cout << "\n\tDistrib median: " << std::get<3>(stats) << std::endl;
for(unsigned int i = 0; i < CompetencyDistribution::HLEVEL.size(); i++)
{
std::cout << "\tAssigned in HL#" << i <<": " << CompetencyDistribution::getHLevel(*this, i).size() << std::endl;
}
std::cout << "Distrib:" << std::endl;
std::cout << "[";
for(unsigned int i = 0; i < this->_distributedCompetencies.size(); i++)
{
std::cout << this->_distributedCompetencies.at(i);
if(i < this->_distributedCompetencies.size() -1 )
std::cout << "|";
}
std::cout << "]" << std::endl;
std::cout << "------------------------------" << std::endl << std::endl;
}
std::tuple<int, int, double, double> CSDVP::distributionStats()
{
std::tuple<int, int, double, double> stats;
int unassigned = 0;
int aboveFive = 0;// >= 0.5
double mean = 0;
double median = -1;
for(unsigned int i = 0; i < this->_distributedCompetencies.size(); i++)
{
if(this->_distributedCompetencies.at(i) == -1)
unassigned++;
else
{
mean += this->_distributedCompetencies.at(i);
if(this->_distributedCompetencies.at(i) >= 0.5)
aboveFive++;
}
}
mean = mean / ( (double)(this->_distributedCompetencies.size() - unassigned));
std::get<0>(stats) = unassigned;
std::get<1>(stats) = aboveFive;
std::get<2>(stats) = mean;
std::get<3>(stats) = median; //@todo
return stats;
}
// === OPERATOR
std::ostream & operator<<(std::ostream & Stream, const CSDVP & c)
{
std::string s = "--------------\n| Problem n°"+std::to_string(c.id())+"|\n---------------\n| Configuration:";
s+= "\n\tseed: "+std::to_string(c.seed())+"\n\tNb comp: "+std::to_string(c.cfg_quantityCompetencies())+"\n\tNb courses: "+std::to_string(c.cfg_quantityCourses())+"\n\tMin TimeF: "+std::to_string(c.cfg_minimalTimeFrame())+"\n\tMax TimeF: "+std::to_string(c.cfg_maximalTimeFrame());
s+= "\n\tECTS Min: "+std::to_string(c.cfg_ectsMin())+"\n\tECTS Max: "+std::to_string(c.cfg_ectsMax())+"\n\tCourse by TF min: "+std::to_string(c.cfg_courseByTFMin())+"\n\tCourse by TF max: "+std::to_string(c.cfg_courseByTFMax());
s+="\n\tMagnitude min: "+std::to_string(c.cfg_magnitudeMin().value())+"\n\tMagnitude max: "+std::to_string(c.cfg_magnitudeMax().value());
std::string s = "--------------\n| Problem n°"+std::to_string(c.id())+"|\n--------------\n| Configuration:";
s+= "\n|\tseed: "+std::to_string(c.seed())+"\n|\tNb comp: "+std::to_string(c.cfg_quantityCompetencies())+"\n|\tNb courses: "+std::to_string(c.cfg_quantityCourses())+"\n|\tMin TimeF: "+std::to_string(c.cfg_minimalTimeFrame())+"\n|\tMax TimeF: "+std::to_string(c.cfg_maximalTimeFrame());
s+= "\n|\tECTS Min: "+std::to_string(c.cfg_ectsMin())+"\n|\tECTS Max: "+std::to_string(c.cfg_ectsMax())+"\n|\tCourse by TF min: "+std::to_string(c.cfg_courseByTFMin())+"\n|\tCourse by TF max: "+std::to_string(c.cfg_courseByTFMax());
s+="\n|\tMagnitude min: "+std::to_string(c.cfg_magnitudeMin().value())+"\n|\tMagnitude max: "+std::to_string(c.cfg_magnitudeMax().value());
s+= "\n| Detail:\n";
Stream << s;
std::vector<Course> courses = c.coursesCatalogue();
for(unsigned int i = 0; i < courses.size(); i++)
Stream << courses[i] << "\n";
Stream << "===Competencies:";
std::vector<Competency> comp = c.competencyCatalogue();
for(unsigned int i = 0; i < comp.size(); i++)
Stream << comp[i] << "\n";
return Stream;
}
// === END OPERATOR
......@@ -2,6 +2,7 @@
#define SRC_PROBLEM_H_
#include <vector>
#include <tuple>
#include "course.h"
#include "competency.h"
......@@ -48,6 +49,8 @@ class CSDVP
Magnitude _minimalMagnitude;
Magnitude _maximalMagnitude;
int _thresholdMinMaxHLevel; // used for rand in prereq assign with HLevel
// ---------- END CONFIGURATION ATTRIBUTES ----------
// ---------- PROBLEM SPECIFIC ATTRIBUTES ----------
......@@ -56,7 +59,11 @@ class CSDVP
std::vector<Course> _availableCourses;
std::vector<std::vector<Course>> _coursesSortedByTF; //sorted by standard index. e.g. TF[4;6] -> [0]=4; [1]=5 ; [2] = 6
std::vector<Competency> _availableCompentecies; //The competency's magnitude should not be used here.
// This array is index aligned with the compCatalogue of the problem. It represents the current compentecies distrib in the pb. Mostly used during configuratioin.
// -1 : not assigned ; [0;1] the ith comp at the indice i in the cmpCatalogue as been assigned, with a value of x € [0;1].
std::vector<double> _distributedCompetencies;
///@todo implements a decay politics
//DecayPolitics
// --------- END PROBLEM SPECIFIC ATTRIBUTES ---------
......@@ -81,6 +88,10 @@ class CSDVP
/// It sources _coursesSortedByTF, which is another view of _availableCourses, sorted by TF
void _makeCoursesSortedByTF();
// This fuction creates a new tmpComp with mag and add it the comp teached by the course idx of the catalogue
// Using the sourcing function keeps the _distributedCompetencies up to date
void _sourceCourseTeachedComp(CSDVP & pb, unsigned int idx, Competency & c);
void _updateDistribComp(CSDVP & pb, Competency & cpt);
public:
// --------- GENERATION RELATED FUNCTION ---------
/// allows a random attribution of pb's attributes
......@@ -117,6 +128,7 @@ class CSDVP
int cfg_prerequisiteByCourseMin() const {return this->_minimalPrerequisiteByCourse;}
int cfg_prerequisiteByCourseMax() const {return this->_maximalPrerequisiteByCourse;}
int cfg_pickedCoursesByTimeFrame() const {return this->_pickedCoursesByTimeFrame;}
int cfg_thresholdHLevelMaxOverMin() const {return this->_thresholdMinMaxHLevel;}
const Magnitude & cfg_magnitudeMin() const{return this->_minimalMagnitude;}
const Magnitude & cfg_magnitudeMax() const{return this->_maximalMagnitude;}
......@@ -130,7 +142,8 @@ class CSDVP
std::vector<int> & unlocked_timeFrames(){return this->_timeFrames;}
std::vector<Course> & unlocked_coursesCatalogue(){return this->_availableCourses;}
std::vector<Competency> & unlocked_competenciesCatalogue(){return this->_availableCompentecies;}
std::vector<double> & unlocked_distributedCompetencies(){return this->_distributedCompetencies;}
int getQuantityCoursesToPick() const{
if(this->_isConfig)
return this->_timeFrames.size() * this->_pickedCoursesByTimeFrame;
......@@ -140,8 +153,22 @@ class CSDVP
* returns the index of the course within the coursesCatalogue [0;size[ ; otherwise return -1 if the course is not found.
*/
int mapCourseToPosition(const Course & c);
/** Maps a competency into its position inside the this->competencyCatalogue().
* returns the index of the competency within the competencyCatalogue [0;size[ ; otherwise return -1 if the competency is not found.
*/
int mapCompToPosition(const Competency & comp);
///@todo getDecayPolitic
// === Competency Distribution related
void const displayDistribution();
/* Retrieves some stats regarding the comp distrib
* First element is the nb of unassigned comp
* Second element is the nb of comp above 0.5
* Third element is the mean (all unassigned elm excluded)
* Fourth element is the median (idem)
*/
std::tuple<int, int, double, double> distributionStats();
// === MUTATOR
// SETTER
/*setSeed is deactivated. The only way to attribute a seed to CSDVP is by generateProblem.*/
......@@ -161,6 +188,7 @@ class CSDVP
void set_cfg_minimalPrerequisiteByCourse(int nb);
void set_cfg_maximalPrerequisiteByCourse(int nb);
void set_cfg_pickedCoursesByTimeFrame(int nb);
void set_cfg_thresholdHLevelMaxOverMin(int thr);
void setTimeFrames(std::vector<int> & v);
void setCoursesCatalogue(std::vector<Course> &);
......
......@@ -6,10 +6,12 @@
#include "profession.h"
#include "competency.h"
#include "competencyDistribution.h"
#include "exception/JobOverlappingBoundariesException.h"
int Profession::PROFESSION_COUNTER = 0;
unsigned int Profession::JOB_SELECTION_TYPE = 0;
// === FACTORY
// No factory needed
......@@ -170,21 +172,52 @@ void Profession::_randomlyGenerate(Profession & job, CSDVP & pb)
std::random_shuffle(tmpComp.begin(), tmpComp.end());
int i;
for(i = 0; i < (int)tmpComp.size() && i < howManyPrereq; i++)
std::vector<Competency> compHigherHL;
switch (Profession::JOB_SELECTION_TYPE)
{
magVal = job.cfg_minimalMagnitude().value() + ( (double)rand()/RAND_MAX) * ( job.cfg_maximalMagnitude().value() - job.cfg_minimalMagnitude().value()) ;
ctmp = Competency::buildTMP(magVal,tmpComp.at(i).name());
case 1: //at least one comp in the higher HL
compHigherHL = CompetencyDistribution::getHLevel(pb, CompetencyDistribution::HLevelRange(pb)-1);
// std::cout << "compHigherHL size :" << compHigherHL.size() << std::endl;
std::random_shuffle(compHigherHL.begin(), compHigherHL.end());
assert(compHigherHL.size() > 0); //if no comp retrieved in the higher hlevel (hhl), there is a pb here !
magVal = job.cfg_minimalMagnitude().value() + ( (double)rand()/RAND_MAX) * ( job.cfg_maximalMagnitude().value() - job.cfg_minimalMagnitude().value()) ;
ctmp = Competency::buildTMP(magVal,compHigherHL.at(0).name());
ctmp.setHL(compHigherHL.at(0).hLevel());
job.addPrerequisite(ctmp);
}
if(i != howManyPrereq) //Warning need to check if still in range because not enought courses
{
if(i < job.cfg_minimalPrerequisites() || i > job.cfg_maximalPrerequisites())
// !! No duplicata protection: we can insert another time the competency above from the HHL
for(i = 0; i < (int)tmpComp.size() && i < howManyPrereq-1; i++) //cp/paste from default case
{
magVal = job.cfg_minimalMagnitude().value() + ( (double)rand()/RAND_MAX) * ( job.cfg_maximalMagnitude().value() - job.cfg_minimalMagnitude().value()) ;
ctmp = Competency::buildTMP(magVal,tmpComp.at(i).name());
ctmp.setHL(tmpComp.at(i).hLevel());
job.addPrerequisite(ctmp);
}
break;
case 2: // emphasis on higher HL
_pickWithHLWeighting(howManyPrereq, job, pb);
break;
default: //classic behavior
for(i = 0; i < (int)tmpComp.size() && i < howManyPrereq; i++)
{
//considering as a fail during generation
assert(i < job.cfg_minimalPrerequisites() || i > job.cfg_maximalPrerequisites());
magVal = job.cfg_minimalMagnitude().value() + ( (double)rand()/RAND_MAX) * ( job.cfg_maximalMagnitude().value() - job.cfg_minimalMagnitude().value()) ;
ctmp = Competency::buildTMP(magVal,tmpComp.at(i).name());
ctmp.setHL(tmpComp.at(i).hLevel());
job.addPrerequisite(ctmp);
}
if(i != howManyPrereq) //Warning need to check if still in range because not enought courses
{
if(i < job.cfg_minimalPrerequisites() || i > job.cfg_maximalPrerequisites())
{
//considering as a fail during generation
assert(i < job.cfg_minimalPrerequisites() || i > job.cfg_maximalPrerequisites());
}
}
break;
}
// If ECTS is set to be random, then calculating it
......@@ -210,6 +243,49 @@ void Profession::_randomlyGenerate(Profession & job, CSDVP & pb)
job.setRequiredECTS(ects);
}
// Here we weight where we pick the comp, the higher the HL, more likely a comp is to be pick
void Profession::_pickWithHLWeighting(int nbToPick, Profession & job, CSDVP & pb)
{
std::vector<int> range;
int sumInterval = 0;
int x; unsigned int currentHL;
double magVal;
Competency ctmp;
const int hLRange = CompetencyDistribution::HLevelRange(pb);
std::vector<Competency> hlComp;
for(int i = 0; i < hLRange ; i++)
{
// sumInterval+=i;
// sumInterval = i * 2;
sumInterval = i * i;
range.push_back(sumInterval);
}
for(int i = 0; i < nbToPick; i++)
{
x = rand() % ( sumInterval + 1);
assert(x <= sumInterval);
currentHL = 0;
// std::cout << "sumInterval: " << sumInterval << " & x: " << x << std::endl;
while(x > range[currentHL] && currentHL < range.size())
{
currentHL++;
}
hlComp = CompetencyDistribution::getHLevel(pb, currentHL);//we get the correspond hl level
assert(hlComp.size() > 0);
std::random_shuffle(hlComp.begin(), hlComp.end());
magVal = job.cfg_minimalMagnitude().value() + ( (double)rand()/RAND_MAX) * ( job.cfg_maximalMagnitude().value() - job.cfg_minimalMagnitude().value()) ;
ctmp = Competency::buildTMP(magVal,hlComp.at(0).name());
ctmp.setHL(hlComp.at(0).hLevel());
job.addPrerequisite(ctmp);
}
}
// === STATIC
int Profession::assignID(){return ++Profession::PROFESSION_COUNTER;}
......
......@@ -40,12 +40,16 @@ class Profession
/** _duplicataProtection returns true if the value (2nd param) searched into (1st param) is found*/
bool _duplicataProtection(std::vector<Competency> *, Competency);
static void _pickWithHLWeighting(int nbToPick, Profession &, CSDVP &);
// Static
static int PROFESSION_COUNTER;
static int assignID();
static void _randomlyGenerate(Profession & job, CSDVP & pb);
public:
static unsigned int JOB_SELECTION_TYPE;
enum GenerationType
{
RANDOM
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment