diff --git a/eraser-base/build.gradle b/eraser-base/build.gradle
index 9005bfbca5082e4fb98c5e48dfae5f229251dd5b..1dac5837c258baf49677789feda63a80f85d2957 100644
--- a/eraser-base/build.gradle
+++ b/eraser-base/build.gradle
@@ -9,7 +9,7 @@ dependencies {
     compile group: 'net.sf.beaver', name: 'beaver-rt', version: '0.9.11'
     compile group: 'org.fusesource.mqtt-client', name: 'mqtt-client', version: '1.15'
     compile group: 'org.influxdb', name: 'influxdb-java', version: '2.15'
-    compile project(':feedbackloop.learner')
+//    compile project(':feedbackloop.learner')
     testCompile group: 'org.testcontainers', name: 'testcontainers', version: '1.11.2'
     testCompile group: 'org.testcontainers', name: 'influxdb', version: '1.11.2'
     testCompile group: 'org.apache.logging.log4j', name: 'log4j-slf4j-impl', version: '2.11.2'
diff --git a/eraser.starter/build.gradle b/eraser.starter/build.gradle
index 870459b22f43ec4ec98b478249d6feeb3533c102..4489b1862e9a74fcec4f8e2bb5c9ca1e9d4c7042 100644
--- a/eraser.starter/build.gradle
+++ b/eraser.starter/build.gradle
@@ -18,7 +18,7 @@ dependencies {
     compile project(':feedbackloop.analyze')
     compile project(':feedbackloop.plan')
     compile project(':feedbackloop.execute')
-//    compile project(':feedbackloop.learner')
+    compile project(':feedbackloop.learner')
     compile project(':feedbackloop.learner_backup')
     compile group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-yaml', version: '2.9.8'
     compile group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: '2.9.8'
diff --git a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/ActivityLearningHandler.java b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/ActivityLearningHandler.java
index 1978586abadd2f28a630d04fe760cd595444a61c..5f03088dd8e9e156f4a21c4490697cb202edb98e 100644
--- a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/ActivityLearningHandler.java
+++ b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/ActivityLearningHandler.java
@@ -1,4 +1,5 @@
 package de.tudresden.inf.st.eraser.feedbackloop.learner;
+import de.tudresden.inf.st.eraser.feedbackloop.learner.LearnerHelper;
 import de.tudresden.inf.st.eraser.jastadd.model.*;
 
 import java.time.Instant;
@@ -6,22 +7,21 @@ import java.util.ArrayList;
 import java.util.List;
 
 /**
- * Adapter for internally held machine learning models.
+ * Adapter for accessing machine learning models that are used for activity recognition.
+ * @author MoralDav
+ * @author Bierzynski
  *
- * @author rschoene - Initial contribution
  */
 public class ActivityLearningHandler extends LearningHandler{
 
-  public ActivityLearningHandler setLearner(LearnerImpl learner) {
-
-    return (ActivityLearningHandler)super.setLearner(learner);
+  public ActivityLearningHandler(LearnerImpl learner, OpenHAB2Model openHAB2model){
+    super(learner,openHAB2model);
   }
 
-  public ActivityLearningHandler setModel(InternalMachineLearningModel model) {
-    return (ActivityLearningHandler)super.setModel(model);
-  }
+  public void InitActivityLearning(){}
 
 
+  //get items that this model is supposed to change
   @Override
   public List<Item> getTargets() {
     List<Item> targets = new ArrayList<Item>();
@@ -32,6 +32,14 @@ public class ActivityLearningHandler extends LearningHandler{
     return targets ;
   }
 
+  /**
+   * Function accesses the meta-information (text file: at which position within the input vector
+   * are the values for a specific item id expected to be delivered?) of the activity recognition
+   * Network-object (managed by the Learner-object) to determine those Items whose current values
+   * are required by the activity recognition machine-learning model for its decision making.
+   *
+   * @return
+   */
   @Override
   public List<Item> getRelevantItems() {
     List<Item> relevantItems = new ArrayList<Item>();
@@ -47,17 +55,25 @@ public class ActivityLearningHandler extends LearningHandler{
     this.getLogger().debug("Ignored training trigger.");
   }
 
-  //classify using the input vector given by newData
-   @Override
+  /**
+   * Function triggers the Learner-object to estimate the current activity using the sensor data
+   * that are found in the activity learning input vector (which is kept up-to-date by the function
+   * LearningHandler.newData). The obtained result is wrapped in a PreferenceItem and ExternalMachineLearningResult.
+   *
+   * @return
+   */
+  @Override
   public MachineLearningResult classify() {
      List<Item> targets = getTargets();//get items that this model is supposed to change
     //Using activity recognition to get current activity
-     ActivityItem activity_item = (ActivityItem) targets.get(0);
+
+     ActivityItem activity_item = (ActivityItem) targets.get(LearnerHelper.IDX_ACTIVITY_ITEM_TARGET);
 
 
     //prepare output
     List<ItemPreference> preferences = new ArrayList<ItemPreference>();
-    ItemPreference preference = getPreferenceItem(activity_item, new double[] {this.getLearner().getActivity()});
+    double [] activity = new double[] {this.getLearner().getActivity()};
+    ItemPreference preference = wrapIntoPreferenceItem(activity_item, activity, LearnerHelper.ID_ACTIVITY_MODEL);
     preferences.add(preference);//add preference to the preferences array
 
 
diff --git a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearnerHelper.java b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearnerHelper.java
index f23b58105b78ea6b84a3f11211e843eb28f2cf13..232f0b2c4a978b0f51038c30a49e73099086fe6a 100644
--- a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearnerHelper.java
+++ b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearnerHelper.java
@@ -29,7 +29,9 @@ public class LearnerHelper {
   private static DoubleArrayDoubleFunction sigmoid = inputs -> Math.signum(Arrays.stream(inputs).sum());
   private static DoubleArrayDoubleFunction tanh = inputs -> Math.tanh(Arrays.stream(inputs).sum());
   private static DoubleArrayDoubleFunction function_one = inputs -> 1.0;
-
+  public static int  ID_ACTIVITY_MODEL = 0;
+  public static int ID_PREFERENCE_MODEL = 1;
+  public static int IDX_ACTIVITY_ITEM_TARGET=0;
   public static NeuralNetworkRoot transform(EncogModel encogModel) {
     NeuralNetworkRoot result = NeuralNetworkRoot.createEmpty();
     List<Double> weights = encogModel.getWeights();
diff --git a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearnerImpl.java b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearnerImpl.java
index 27ef0067449557a88d45ba3452a3aeb1cc0d1d15..b663e944807cd574fe3853b8ad7051dea1f85bd0 100644
--- a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearnerImpl.java
+++ b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearnerImpl.java
@@ -16,17 +16,18 @@ import org.encog.util.arrayutil.NormalizedField;
 import org.encog.util.csv.CSVFormat;
 import org.encog.util.csv.ReadCSV;
 
-import java.io.File;
-import java.io.InputStream;
+import java.io.*;
 import java.net.URL;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.*;
+import java.util.logging.Level;
 
 /**
  * Implementation of the Learner.
  *
  * @author Bierzyns - Initial contribution
+ * @author MoralDav
  */
 public class LearnerImpl implements Learner {
 
@@ -38,8 +39,44 @@ public class LearnerImpl implements Learner {
   private CSVFormat format = new CSVFormat('.', ',');
   private Map<Integer, Dataset> datasets = new HashMap<>();
   private Map<Integer, Network> models = new HashMap<>();
+
+  // maps a model-id to an input vector, i.e., to a vector that contains those
+  // sensor-values/activities that this model will be applied to when LearnerImpl.computeResult
+  // is called.
   private Map<Integer, List<Double>> inputVectors = new HashMap<>();
 
+
+  public   Map<String, Integer> readMapFromFile(String string_relative_path){
+    // create and load properties from file
+    Properties properties = new Properties();
+    try{
+      Path relative_path = Paths.get(string_relative_path);
+      Path absolute_path = relative_path.toAbsolutePath();
+      FileInputStream inputStream = new FileInputStream(String.valueOf(absolute_path));
+
+      if (inputStream != null) {
+        properties.load(inputStream);
+      } else {
+        throw new FileNotFoundException("property file '" + absolute_path.toString() + "' not found ");
+      }
+      inputStream.close();
+    }catch (IOException e){
+      System.out.println("error");
+    }
+    Map<String, Integer> map = new HashMap<>();
+    for (final String itemId: properties.stringPropertyNames())
+      map.put(itemId, Integer.parseInt(properties.getProperty(itemId)));
+    return map;
+  }
+
+  public void InitModelsVectors(String fileInputVector, String fileOutputVector, int modelId){
+    Network network = models.get(modelId);
+    Map<String, Integer> mapInput = readMapFromFile(fileInputVector);
+    network.setIndexInputvector(mapInput);
+    Map<String, Integer> mapOutput = readMapFromFile(fileOutputVector);
+    network.setIndexOutputvector(mapOutput);
+  }
+
   @Override
   public void setKnowledgeBase(Root knowledgeBase) {
     this.knowledgeBase = knowledgeBase;
@@ -67,46 +104,64 @@ public class LearnerImpl implements Learner {
     return true;
   }
 
+  public boolean loadModelFromFile(String path, int modelID, List<Integer> inputMaxes,
+                                   List<Integer> inputMins, List<Integer> targetMaxes, List<Integer> targetMins) {
+    logger.debug("Load model from file {}", path);
+    models.put(modelID, new Network(path, modelID, inputMaxes, inputMins, targetMaxes, targetMins));
+    return true;
+  }
+
   @Override
   public boolean loadModelFromFile(File file, int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
-                                   List<Integer> targetMins,List<String> listInputIndex, List<String> listOutputIndex) {
+                                   List<Integer> targetMins) {
     logger.debug("Load model from file {}", file);
-    models.put(modelID, new Network(file.getAbsolutePath(), modelID, inputMaxes, inputMins, targetMaxes, targetMins, listInputIndex, listOutputIndex));
+    models.put(modelID, new Network(file.getAbsolutePath(), modelID, inputMaxes, inputMins, targetMaxes, targetMins));
     inputVectors.put(modelID,new ArrayList<>());
     return true;
   }
 
   @Override
   public boolean loadModelFromFile(InputStream input, int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
-                                   List<Integer> targetMins,List<String> listInputIndex, List<String> listOutputIndex) {
+                                   List<Integer> targetMins) {
     logger.debug("Load model from input stream");
-    models.put(modelID, new Network(input, modelID, inputMaxes, inputMins, targetMaxes, targetMins, listInputIndex, listOutputIndex));
+    models.put(modelID, new Network(input, modelID, inputMaxes, inputMins, targetMaxes, targetMins));
     inputVectors.put(modelID,new ArrayList<>());
     return true;
   }
 
   @Override
-  public boolean train(int inputCount, int outputCount, int hiddenCount, int hiddenNeuronCount, int modelID,
-                       List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
-                       List<Integer> targetMins,List<String> listInputIndex, List<String> listOutputIndex) {
+  public boolean train(int inputCount, int outputCount, int hiddenCount, int hiddenNeuronCount,
+                       int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
+                       List<Integer> targetMins) {
     // Method for the initial training of algorithms and models. That uses external data set for training.
-
+    double learningrate = 0.07;
+    double momentum = 0.02;
     if (datasets.get(modelID) != null) {
       Dataset set = datasets.get(modelID);
 
       ReadCSV csv = set.getCsv();
 
       Network model = new Network(inputCount, outputCount, hiddenCount, hiddenNeuronCount, modelID, inputMaxes,
-          inputMins, targetMaxes, targetMins, listInputIndex,  listOutputIndex);
+              inputMins, targetMaxes, targetMins);
+
+      model.setLearningrate(learningrate);
+      model.setMomentum(momentum);
 
-      ArrayList<Double> input = new ArrayList<>();
-      ArrayList<Double> target = new ArrayList<>();
+      // double[][] input = new double[rowCount][inputMins.size()];
+      // double[][] target = new double[rowCount][targetMins.size()];
 
+      List<Double> input = new ArrayList<>();
+      List<Double> target = new ArrayList<>();
+
+      int countRows = 0;
+
+      logger.debug("read csv");
+      // LOGGER.log(Level.WARNING, "tcol size: " + set.getTargetColumns().size() + "tcol0: " + set.getTargetColumns().get(0) + "tcol1: " + set.getTargetColumns().get(1) + "tcol2: " + set.getTargetColumns().get(2));
       while (csv.next()) {
-        logger.debug("Train next csv row");
+
+        // train one row after another
         for (int i = 0; i < csv.getColumnCount(); i++) {
-          int col_nr = i + 1;
-          if (set.getTargetColumns().contains(col_nr)) {
+          if (set.getTargetColumns().contains(i)) {
             target.add(csv.getDouble(i));
           } else {
             input.add(csv.getDouble(i));
@@ -116,11 +171,29 @@ public class LearnerImpl implements Learner {
         model.train(input, target);
         input.clear();
         target.clear();
+
+        // train with all rows at same time
+            /*	int tar_nr = 0;
+                int in_nr = 0;
+
+
+                for (int i = 0; i < csv.getColumnCount(); i++) {
+                    if (set.getTargetColumns().contains(i)) {
+                        target[countRows][tar_nr]= csv.getDouble(i);
+                        tar_nr++;
+                    } else {
+                    	input[countRows][in_nr]= csv.getDouble(i);
+                    	in_nr++;
+                    }
+                }
+
+                countRows++; */
       }
 
+      // model.train(input, target);
+
       models.put(modelID, model);
-      inputVectors.put(modelID,new ArrayList<>());
-      model.saveModel(modelFolderPath);
+      model.saveModel(this.modelFolderPath);
 
       return true;
     }
@@ -130,10 +203,10 @@ public class LearnerImpl implements Learner {
   @Override
   public boolean train(double[][] data, int inputCount, int outputCount, int hiddenCount, int hiddenNeuronCount, int modelID,
                        List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
-                       List<Integer> targetMins, List<Integer> targetColumns, List<String> listInputIndex, List<String> listOutputIndex) {
+                       List<Integer> targetMins, List<Integer> targetColumns) {
 
     Network model = new Network(inputCount, outputCount, hiddenCount, hiddenNeuronCount, modelID, inputMaxes,
-        inputMins, targetMaxes, targetMins, listInputIndex, listOutputIndex);
+            inputMins, targetMaxes, targetMins);
 
     return reTrainModel(model, data, targetColumns, modelID);
   }
@@ -163,7 +236,7 @@ public class LearnerImpl implements Learner {
 
         model.train(input, target);
         input.clear();
-        target.clear(); 
+        target.clear();
       }
 
     }
@@ -237,7 +310,7 @@ public class LearnerImpl implements Learner {
 
 
   private void setValueInputVectorByModelId(int modelId, int index, Double value){
-     this.inputVectors.get(modelId).set(index,value);
+    this.inputVectors.get(modelId).set(index,value);
   }
 
   private int  getInputIndex( String itemId, int modelID){
@@ -249,35 +322,35 @@ public class LearnerImpl implements Learner {
   }
 
   public double getActivity(){
-    double[] output_activity_recognition = this.models.get(0).computeResult(this.inputVectors.get(0));
+    double[] output_activity_recognition = this.models.get(LearnerHelper.ID_ACTIVITY_MODEL).computeResult(this.inputVectors.get(LearnerHelper.ID_ACTIVITY_MODEL));
     return output_activity_recognition[0];
   }
 
   public double[] getPreferencesForCurrentActivity(){
-    return this.models.get(1).computeResult(this.inputVectors.get(1));
+    return this.models.get(LearnerHelper.ID_PREFERENCE_MODEL).computeResult(this.inputVectors.get(LearnerHelper.ID_PREFERENCE_MODEL));
   }
 
   //prepare input vector for classify
   private void updateInputVector(Item item, int modelId){
-      int index = getInputIndex(item.getID(),modelId);//get position in the input vector
-      //update item in position "index" in input vector
-      if(item instanceof ColorItem){//color items have three values (HSB)
-        ColorItem coloritem = (ColorItem) item;
-        TupleHSB state = coloritem.get_state();
-        setValueInputVectorByModelId(modelId,index, Double.valueOf(state.getHue()) );
-        setValueInputVectorByModelId(modelId,index+1, Double.valueOf(state.getSaturation()));
-        setValueInputVectorByModelId(modelId,index+2, Double.valueOf(state.getBrightness()));
-      }else{
-        double value = item.getStateAsDouble();
-        setValueInputVectorByModelId(modelId,index, value );
-      }
+    int index = getInputIndex(item.getID(),modelId);//get position in the input vector
+    //update item in position "index" in input vector
+    if(item instanceof ColorItem){//color items have three values (HSB)
+      ColorItem coloritem = (ColorItem) item;
+      TupleHSB state = coloritem.get_state();
+      setValueInputVectorByModelId(modelId,index, Double.valueOf(state.getHue()) );
+      setValueInputVectorByModelId(modelId,index+1, Double.valueOf(state.getSaturation()));
+      setValueInputVectorByModelId(modelId,index+2, Double.valueOf(state.getBrightness()));
+    }else{
+      double value = item.getStateAsDouble();
+      setValueInputVectorByModelId(modelId,index, value );
+    }
   }
 
   public void updateLearner(List<Item> changedItems){
     int n_models = this.models.size();
     for(Item item:changedItems){
       for(int i = 0; i< n_models ;i++){
-        if(this.models.get(i).itemRelevant(item.getID())) {
+        if(this.models.get(i).isRelevantItem(item.getID())) {
           updateInputVector(item, i);
         }
       }
@@ -285,21 +358,28 @@ public class LearnerImpl implements Learner {
   }
 
   public List<String> getTargetItemsIdsPreferenceLearning() {
-    return this.models.get(1).getTargetItemsIds();
+    return this.models.get(LearnerHelper.ID_PREFERENCE_MODEL).getTargetItemsIds();
   }
 
 
   public List<String> getRelevantItemsIdsPrefenceLearning() {
-    return this.models.get(1).getRelevantItemsIds();
+    return this.models.get(LearnerHelper.ID_PREFERENCE_MODEL).getRelevantItemsIds();
   }
 
 
   public List<String> getTargetItemsIdsActivityLearning() {
-    return this.models.get(0).getTargetItemsIds();
+    return this.models.get(LearnerHelper.ID_ACTIVITY_MODEL).getTargetItemsIds();
   }
 
 
   public List<String> getRelevantItemsIdsActivityLearning() {
-    return this.models.get(0).getRelevantItemsIds();
+    return this.models.get(LearnerHelper.ID_ACTIVITY_MODEL).getRelevantItemsIds();
+  }
+
+  public double[] computeResult(int modelID, List<Double> inputVector) {
+
+    Network model = this.models.get(modelID);
+
+    return model.computeResult(inputVector);
   }
 }
diff --git a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearningHandler.java b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearningHandler.java
index 0bd38c578073a11d01520cdcd82503f71a039ad4..b4cb29a7b7a0b65a8c190a41b4917d1b7c8dad13 100644
--- a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearningHandler.java
+++ b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/LearningHandler.java
@@ -7,36 +7,44 @@ import java.time.Instant;
 import java.util.List;
 
 /**
- * Adapter for internally held machine learning models.
+ * Adapter for accessing machine learning models that are held outside of the knowledge base.
+ *  Class contains functions that are commonly used by ActivityLearningHandler and
+ * PreferenceLearningHandler. It accesses a Learner-object to update all input vectors of the
+ * associated machine-learning models (function: newData) and the  openHAB2model-object to
+ * obtain references to the Item-objects  * of the individual sensors/actuators (function: resolve)
  *
- * @author rschoene - Initial contribution
+ * @author MoralDav
+ * @author Bierzynski
  */
 public abstract class  LearningHandler implements MachineLearningEncoder, MachineLearningDecoder {
 
   private static final Logger logger = LogManager.getLogger(LearningHandler.class);
   private LearnerImpl Learner;
-  private InternalMachineLearningModel model;
+  //private InternalMachineLearningModel model;
   private OpenHAB2Model openHAB2model;
 
+
+  public LearningHandler(LearnerImpl learner, OpenHAB2Model openHAB2model){
+    this.Learner = learner;
+    this.openHAB2model = openHAB2model;
+  }
   public LearnerImpl getLearner() {
     return this.Learner;
   }
-
-  public LearningHandler setLearner(LearnerImpl learner) {
-    this.Learner=learner;
-    return this;
-  }
-
+  public OpenHAB2Model getOpenHAB2model(){return this.openHAB2model;}
+
+  /**
+   * Function triggers the Learner to update the input vectors of all Networks (i.e.,
+   * machine-learning models) that it manages with the item values that have changed
+   *
+   * @param changedItems A list of items whose state has changed
+   */
   @Override
   public void newData(List<Item> changedItems) {
     //prepare input vector for each model and each item
     this.getLearner().updateLearner(changedItems);
   }
 
-  public LearningHandler setModel(InternalMachineLearningModel model) {
-    this.model = model;
-    return this;
-  }
 
   public static Logger getLogger() {
     return logger;
@@ -47,6 +55,13 @@ public abstract class  LearningHandler implements MachineLearningEncoder, Machin
     // ignored
   }
 
+  /**
+   * Function allows for accessing the Item-object that is associated in the knowledge base
+   * to the given itemId.
+   *
+   * @param itemId
+   * @return
+   */
   public Item resolve(String itemId) {
 
     java.util.Optional<Item> maybeItem = this.openHAB2model.resolveItem(itemId);
@@ -74,9 +89,9 @@ public abstract class  LearningHandler implements MachineLearningEncoder, Machin
    @Override
   public abstract MachineLearningResult classify();
 
-  public ItemPreference getPreferenceItem(Item item, double[] output_preferenceLearning){
+  public ItemPreference wrapIntoPreferenceItem(Item item, double[] output_preferenceLearning, int modelId){
     ItemPreference preference;
-    int index = this.Learner.getOutputIndex(item.getID(),1);
+    int index = this.Learner.getOutputIndex(item.getID(),modelId);
     if(item.getClass().getName()=="ColorItem"){
       preference = new ItemPreferenceColor(item, TupleHSB.of( (int)Math.round(output_preferenceLearning[index]),  (int)Math.round(output_preferenceLearning[index+1]), (int)Math.round(output_preferenceLearning[index+2])));
     }else{
diff --git a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/Network.java b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/Network.java
index 12c23b292e0507ece243c86da96e56c24ae34c85..e97e7f40379b463ac0ce8877ef88d6a7954f7ba5 100644
--- a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/Network.java
+++ b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/Network.java
@@ -2,9 +2,9 @@ package de.tudresden.inf.st.eraser.feedbackloop.learner;
 
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.IOException;
 import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.List;
+import java.util.*;
 
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.Logger;
@@ -18,14 +18,13 @@ import org.encog.neural.networks.training.propagation.back.Backpropagation;
 import org.encog.persist.EncogDirectoryPersistence;
 import org.encog.util.arrayutil.NormalizationAction;
 import org.encog.util.arrayutil.NormalizedField;
-import java.util.HashMap;
-import java.util.Map;
 import org.encog.util.simple.EncogUtility;
 
 /**
  * Network class serves as interface to encog BasicNetwork and holdsfunctions for handling the BasicNetwork (training, input, output and inference)
  *
  * @author Bierzynski - initial contribution
+ * @author MoralDav
  */
 public class Network {
   private static final Logger logger = LogManager.getLogger(Network.class);
@@ -33,8 +32,17 @@ public class Network {
   private int modelID;
   private ArrayList<NormalizedField> normalizersIn;
   private ArrayList<NormalizedField> normalizersTar;
+
+  // maps item-ids to the index positions at which the values of those items
+  // are expected to be placed in the input-vector of this model (see LearnerImpl.InputVectors)
+  // TODO: use LearnerImpl.InitModelsVectors to update indexInputvector/indexOutputvector
   private Map<String, Integer> indexInputvector = new HashMap<>();
+
+  // maps item-ids to the index positions at which the values of those items will be placed within
+  // the output of this model (see e.g.: PreferenceLearningHandler.classify, Network.computeResult)
   private Map<String, Integer> indexOutputvector = new HashMap<>();
+  private Double learningrate=3.5;
+  private Double momentum=0.3;
 
   /**
    * Constructor for when the neural network is created from data.
@@ -48,12 +56,14 @@ public class Network {
    * @param inputMins         list that contains min values of all input columns (sensors) e.g. light intensity 0
    * @param targetMaxes       list that contains max values of all output columns (results) e.g. brightness 100 for preference learning
    * @param targetMins        list that contains min values of all output columns (results) e.g. brightness 0 for preference learning
-   * @param listInputIndex        list that containg the item's Ids to initialize the map indexInputvector
-   * @param  listOutputIndex       list that containg the item's Ids to initialize the map indexOutpurvector
    */
   public Network(int inputCount, int outputCount, int hiddenCount, int hiddenNeuronCount, int modelID,
                  List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
-                 List<Integer> targetMins,List<String> listInputIndex,List<String> listOutputIndex) {
+                 List<Integer> targetMins) {
+
+
+    learningrate = 3.5;
+    momentum = 0.3;
 
     normalizersIn = new ArrayList<>();
     normalizersTar = new ArrayList<>();
@@ -73,17 +83,15 @@ public class Network {
 
     addNormalizer(inputMaxes, inputMins, normalizersIn);
     addNormalizer(targetMaxes, targetMins, normalizersTar);
-    initializeMaps(listInputIndex,listOutputIndex);
+
   }
 
-  private void initializeMaps(List<String> listInputIndex, List<String> listOutputIndex){
-    for(int i = 0; i < listInputIndex.size();i++){
-      this.indexInputvector.put(listInputIndex.get(i),i);
-    }
-    for(int i = 0; i < listOutputIndex.size();i++){
-      this.indexOutputvector.put(listOutputIndex.get(i),i);
-    }
+  public void setIndexInputvector(Map<String, Integer> indexInputvector) {
+    this.indexInputvector = indexInputvector;
+  }
 
+  public void setIndexOutputvector(Map<String, Integer> indexOutputvector) {
+    this.indexOutputvector = indexOutputvector;
   }
 
   public List<String> getTargetItemsIds(){
@@ -121,12 +129,22 @@ public class Network {
    * @param inputMins   list that contains min values of all input columns (sensors) e.g. light intensity 0
    * @param targetMaxes list that contains max values of all output columns (results) e.g. brightness 100 for preference learning
    * @param targetMins  list that contains min values of all output columns (results) e.g. brightness 0 for preference learning
-   * @param listInputIndex        list that containg the item's Ids to initialize the map indexInputvector
-   * @param  listOutputIndex       list that containg the item's Ids to initialize the map indexOutpurvector
    */
   public Network(String path, int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
-                 List<Integer> targetMins, List<String> listInputIndex,List<String> listOutputIndex) {
-    this(() -> (BasicNetwork) EncogDirectoryPersistence.loadObject(new File(path, "NN_" + modelID)), modelID, inputMaxes, inputMins, targetMaxes, targetMins,listInputIndex,listOutputIndex);
+                 List<Integer> targetMins) {
+    learningrate = 3.5;
+    momentum = 0.3;
+
+    this.modelID = modelID;
+
+    normalizersIn = new ArrayList<>();
+    normalizersTar = new ArrayList<>();
+
+    network = (BasicNetwork) EncogDirectoryPersistence.loadObject(new File(path + "NN_" + modelID));
+
+    addNormalizer(inputMaxes, inputMins, normalizersIn);
+    addNormalizer(targetMaxes, targetMins, normalizersTar);
+
   }
 
   /**
@@ -139,16 +157,16 @@ public class Network {
    * @param inputMins   list that contains min values of all input columns (sensors) e.g. light intensity 0
    * @param targetMaxes list that contains max values of all output columns (results) e.g. brightness 100 for preference learning
    * @param targetMins  list that contains min values of all output columns (results) e.g. brightness 0 for preference learning
-   * @param listInputIndex        list that containg the item's Ids to initialize the map indexInputvector
-   * @param  listOutputIndex       list that containg the item's Ids to initialize the map indexOutpurvector
    */
   public Network(InputStream input, int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
-                 List<Integer> targetMins, List<String> listInputIndex,List<String> listOutputIndex) {
-    this(() -> (BasicNetwork) EncogDirectoryPersistence.loadObject(input), modelID, inputMaxes, inputMins, targetMaxes, targetMins, listInputIndex,listOutputIndex);
+                 List<Integer> targetMins) {
+    this(() -> (BasicNetwork) EncogDirectoryPersistence.loadObject(input), modelID, inputMaxes, inputMins, targetMaxes, targetMins);
   }
 
   private Network(LoadEncogModel loader, int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
-                 List<Integer> targetMins, List<String> listInputIndex,List<String> listOutputIndex) {
+                  List<Integer> targetMins) {
+    learningrate = 3.5;
+    momentum = 0.3;
     this.modelID = modelID;
 
     normalizersIn = new ArrayList<>();
@@ -158,7 +176,7 @@ public class Network {
 
     addNormalizer(inputMaxes, inputMins, normalizersIn);
     addNormalizer(targetMaxes, targetMins, normalizersTar);
-    initializeMaps(listInputIndex,listOutputIndex);
+
   }
 
   @FunctionalInterface
@@ -204,7 +222,36 @@ public class Network {
 
     train.finishTraining();
   }
+  // method to train with all rows
+  public void train(double[][] input, double[][] target) {
+    double[][] INPUT_ = new double[input.length][input[0].length];
+    double[][] IDEAL = new double[target.length][target[0].length];
+
+    for(int i = 0; i<input.length; i++) {
+      for (int j = 0; j < input[0].length; j++) {
+        INPUT_[i][j] = normalizersIn.get(j).normalize(input[i][j]);
+      }
+    }
+
+    for(int a = 0; a<target.length; a++) {
+      for (int b = 0; b < target[0].length; b++) {
+        IDEAL[a][b] = normalizersTar.get(b).normalize(target[a][b]);
+      }
+    }
 
+    MLDataSet trainingSet = new BasicMLDataSet(INPUT_, IDEAL);
+    MLTrain train = new Backpropagation(network, trainingSet, learningrate, momentum);
+
+    //int epoch = 1;
+    do {
+
+      train.iteration();
+      // LOGGER.log(Level.WARNING, "Epoch #" + epoch + " Error:" + train.getError());
+      // epoch++;
+    } while (train.getError() > 0.005);
+
+    train.finishTraining();
+  }
   /**
    * Method that uses the {@link BasicNetwork} to predict/classify/.. something based on an input.
    *
@@ -239,7 +286,15 @@ public class Network {
     return normalizersTar;
   }
 
-  public boolean itemRelevant(String itemId){
+  public boolean isRelevantItem(String itemId){
     return this.indexInputvector.containsKey(itemId);
   }
+
+  public void setMomentum(Double momentum) {
+    this.momentum = momentum;
+  }
+
+  public void setLearningrate(Double learningrate) {
+    this.learningrate = learningrate;
+  }
 }
diff --git a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/PreferenceLearningHandler.java b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/PreferenceLearningHandler.java
index 682bb1363bfb3f252ba4f1dfaa691431edb82829..27602181eb4e6a28767417af2b618c5f481c3f44 100644
--- a/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/PreferenceLearningHandler.java
+++ b/feedbackloop.learner/src/main/java/de/tudresden/inf/st/eraser/feedbackloop/learner/PreferenceLearningHandler.java
@@ -7,18 +7,18 @@ import java.util.List;
 /**
  * Adapter for internally held machine learning models.
  *
- * @author rschoene - Initial contribution
+ * @author MoralDav
+ * @author Bierzynski
+ * @
  */
 public class PreferenceLearningHandler extends ActivityLearningHandler implements MachineLearningEncoder, MachineLearningDecoder {
 
-  public PreferenceLearningHandler setLearner(LearnerImpl learner) {
-    return (PreferenceLearningHandler) super.setLearner(learner);
-  }
 
-  public PreferenceLearningHandler setModel(InternalMachineLearningModel model) {
-    return (PreferenceLearningHandler) super.setModel(model);
+  public PreferenceLearningHandler(LearnerImpl learner, OpenHAB2Model openHAB2model){
+    super(learner,openHAB2model);
   }
 
+
   @Override
   public List<Item> getTargets() {
     List<Item> targets = new ArrayList<Item>();
@@ -29,6 +29,15 @@ public class PreferenceLearningHandler extends ActivityLearningHandler implement
     return targets ;
   }
 
+
+    /**
+     * Function accesses the meta-information (text file: at which position within the input vector
+     * are the values for a specific item id expected to be delivered?) of the preference learning
+     * Network-object (managed by the Learner-object) to determine those Items whose current values
+     * are required by the preference learning machine-learning model for its decision making.
+     *
+     * @return
+     */
   @Override
   public List<Item> getRelevantItems() {
     List<Item> relevantItems = new ArrayList<Item>();
@@ -44,12 +53,22 @@ public class PreferenceLearningHandler extends ActivityLearningHandler implement
     getLogger().debug("Ignored training trigger.");
   }
 
-  //classify using the input vector given by newData
+    /**
+     * Function triggers the Learner-object to estimate the currently preferred lighting configuration
+     * using the sensor data that are found in the preference learning input vector (which is kept
+     * up-to-date by the function LearningHandler.newData). To further update this input vector the
+     * currently performed activity is estimated first and written into the preference learning input
+     * vector (this.getLearner().updateLearner(changedItems)).
+     *
+     * @return
+     */
    @Override
   public MachineLearningResult classify() {
     //Using activity recognition to get current activity
      MachineLearningResult resultActivityRecognition = super.classify();
-     ActivityItem current_activity = (ActivityItem) resultActivityRecognition.getPreferences();
+     List<ItemPreference> preferencesList = resultActivityRecognition.getPreferences();
+     Item item =  preferencesList.get(LearnerHelper.IDX_ACTIVITY_ITEM_TARGET).getItem();
+     ActivityItem current_activity = (ActivityItem) item ;
 
      //update Learner's state
      List<Item> changedItems = new ArrayList<>();
@@ -64,7 +83,7 @@ public class PreferenceLearningHandler extends ActivityLearningHandler implement
     List<ItemPreference> preferences = new ArrayList<ItemPreference>();
     List<Item> targets = getTargets();//get items that this model is supposed to change
     for (Item i:targets){
-      ItemPreference preference = getPreferenceItem(i,output_preferenceLearning);
+      ItemPreference preference = wrapIntoPreferenceItem(i,output_preferenceLearning, LearnerHelper.ID_PREFERENCE_MODEL);
       preferences.add(preference);//add preference to the preferences array
     }