Skip to content
Snippets Groups Projects
Commit 6c394865 authored by René Schöne's avatar René Schöne
Browse files

Remove unused modules.

parent 7cb455b6
No related branches found
No related tags found
1 merge request!19dev to master
Showing
with 0 additions and 1034 deletions
/**
* Copyright (c) 2010-2016 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openhab.action.machinelearn.internal;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Extension of the default OSGi bundle activator
*
* @author Kai Kreuzer
* @since 1.3.0
*/
public final class MachineLearnActivator implements BundleActivator {
private static Logger logger = LoggerFactory.getLogger(MachineLearnActivator.class);
private static BundleContext context;
/**
* Called whenever the OSGi framework starts our bundle
*/
@Override
public void start(BundleContext bc) throws Exception {
context = bc;
logger.debug("Machine Learning action has been started.");
}
/**
* Called whenever the OSGi framework stops our bundle
*/
@Override
public void stop(BundleContext bc) throws Exception {
context = null;
logger.debug("Machine Learning action has been stopped.");
}
/**
* Returns the bundle context of this bundle
*
* @return the bundle context
*/
public static BundleContext getContext() {
return context;
}
}
Bundle resources go in here!
\ No newline at end of file
build/
/bin/
logs/
Manifest-Version: 1.0
Private-Package: org.openhab.action.helloworld.internal
Ignore-Package: org.openhab.action.helloworld.internal
Bundle-License: http://www.eclipse.org/legal/epl-v10.html
Bundle-Name: openLicht Reinforcement Learning Action
Bundle-SymbolicName: org.openlicht.action.reinforcementlearning
Bundle-Version: 1.9.0.qualifier
Bundle-Activator: org.openlicht.action.reinforcementlearning.internal.MainActivator
Bundle-ManifestVersion: 2
Bundle-Description: This is the Hello World action of the open Home Aut
omation Bus (openHAB)
Import-Package: org.apache.commons.lang,
org.eclipse.jdt.annotation,
org.openhab.core.library.types,
org.openhab.core.scriptengine.action,
org.osgi.framework,
org.osgi.service.cm,
org.osgi.service.component,
org.slf4j
Bundle-DocURL: http://www.openhab.org
Bundle-RequiredExecutionEnvironment: JavaSE-1.8
Service-Component: OSGI-INF/action.xml
Bundle-ClassPath: .,
lib/encog-core-3.4.jar
Bundle-ActivationPolicy: lazy
Automatic-Module-Name: org.openlicht.action.reinforcementlearning
Require-Bundle: org.eclipse.smarthome.core
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright (c) 2010-2016 by the respective copyright holders.
All rights reserved. This program and the accompanying materials
are made available under the terms of the Eclipse Public License v1.0
which accompanies this distribution, and is available at
http://www.eclipse.org/legal/epl-v10.html
-->
<scr:component xmlns:scr="http://www.osgi.org/xmlns/scr/v1.1.0" activate="activate" deactivate="deactivate" immediate="true" name="org.openlicht.action.reinforcementlearning.action">
<implementation class="org.openlicht.action.reinforcementlearning.internal.MainActionService" />
<service>
<provide interface="org.openhab.core.scriptengine.action.ActionService" />
<provide interface="org.osgi.service.cm.ManagedService" />
</service>
<property name="service.pid" type="String" value="org.openhab.helloworld" />
</scr:component>
dependencies {
compile files('lib/encog-core-3.4.jar')
compile project(':stub.org.openhab.core.scriptengine.action')
compile group: 'org.eclipse.jdt', name: 'org.eclipse.jdt.annotation', version: '2.2.200'
compile group: 'org.osgi', name: 'org.osgi.framework', version: '1.9.0'
compile group: 'org.osgi', name: 'org.osgi.service.cm', version: '1.6.0'
compile group: 'org.osgi', name: 'org.osgi.service.component', version: '1.4.0'
compile group: 'org.slf4j', name: 'slf4j-api', version: '1.7.25'
}
sourceSets {
main {
java {
srcDir 'src/main/java'
}
}
}
File deleted
/**
* Copyright (c) 2010-2016 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openlicht.action.reinforcementlearning.internal;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.HashMap;
import org.openhab.core.scriptengine.action.ActionDoc;
import org.openhab.core.scriptengine.action.ParamDoc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class provides static methods that can be used in automation rules
* for using Reinforcement Learning methods and conect to OpenHab.
*
* @author David Morales Rodríguez
*
*
*/
public class Main {
private static int N_LAMPS = 3;
private static int N_VALUES = 3;
private static ReinforcementLearningAlgorithm rl_algorithm = null;
private static Logger logger = LoggerFactory.getLogger(Main.class);
// method to change string to double. Used to get naturalLight (home I/O uses "," for float numbers)
private static double changeStringToDouble(String string_number) {
double number;
int endIndex = string_number.indexOf(",");
if (endIndex > 0) {
number = Double.parseDouble(string_number.substring(0, endIndex));
if (endIndex < string_number.length() - 1) {
String decimal_string = string_number.substring(endIndex + 1, string_number.length());
double decimal = Double.parseDouble(decimal_string);
decimal = decimal / (Math.pow(10, decimal_string.length()));
number = number + decimal;
}
} else {
number = Double.parseDouble(string_number);
}
return number;
}
// Method to init the model
@ActionDoc(text = "Test Method for building the model")
public static synchronized void initModel() throws IOException {
logger.warn("Initializing model.");
if (rl_algorithm == null) {
trainAndSave();
} else {
logger.warn("model was already initialized.");
}
}
// Method to init the model loading it
@ActionDoc(text = "Test Method for building the model")
public static synchronized void initModel(@ParamDoc(name = "f_nnRgb") String f_nnRgb,
@ParamDoc(name = "f_nnBright") String f_nnBright) throws IOException {
logger.warn("Initializing model.");
if (rl_algorithm == null) {
rl_algorithm = new ReinforcementLearningAlgorithm(N_LAMPS, N_VALUES, f_nnRgb, f_nnBright);
} else {
logger.warn("model was already initialized.");
}
}
// Method to save the model in the following files
// file for nn rgb: "\\openhab-2.3.0\\datasets\\RGB_NN"
// file for nn bright: "\\openhab-2.3.0\\datasets\\Bright_NN"
@ActionDoc(text = "Test Method for saving the model")
public static synchronized void saveModel() throws IOException {
logger.warn("saving model.");
rl_algorithm.saveModel();
logger.warn("model was saved");
}
// Method to get the plan for the current state(activity, natural light)
@ActionDoc(text = "Test Method for getting the plan", returns = "plan")
public static synchronized HashMap<String, Double> planBright(@ParamDoc(name = "activity") int activity,
@ParamDoc(name = "natural_light") String natural_light) throws IOException {
if (rl_algorithm == null) {
initModel();
}
// natural light from 0 to 10 in HOME IO, from 0 to 100 for us
double light = changeStringToDouble(natural_light) * 10;
State state = new State(activity, light);
logger.warn("getting plan for activity: " + Integer.toString(activity) + " natural light: "
+ Double.toString(light));
// get plan using RL
double aux_plan = rl_algorithm.planBright(state);
// construct hashMap to return
HashMap<String, Double> plan = new HashMap<String, Double>();
plan.put("lamp" + Integer.toString(activity), aux_plan);
return plan;
}
// training method, input: activity, natural light, ideal_value
@ActionDoc(text = "Test Method for training")
public static synchronized void trainBright(@ParamDoc(name = "activity") int activity,
@ParamDoc(name = "natural_light") String natural_light, @ParamDoc(name = "ideal_light") double ideal_light)
throws IOException {
if (rl_algorithm == null) {
initModel();
}
logger.warn("training algorithm");
double ideal = ideal_light;
// natural light from 0 to 10 in HOME IO, from 0 to 100 for us
State state = new State(activity, changeStringToDouble(natural_light) * 10);
rl_algorithm.trainAlgorithmBright(state, ideal);
}
// method for training the model using datasets and then save the model
private static void trainAndSave() {
rl_algorithm = new ReinforcementLearningAlgorithm(N_LAMPS, N_VALUES);
String dataset = "\\openhab-2.3.0\\datasets\\dataset.txt";
String output = "\\openhab-2.3.0\\datasets\\output.txt";
String line;
// read dataset to train our model
try {
FileReader f = new FileReader(dataset);
BufferedReader b = new BufferedReader(f);
logger.warn("reading dataset.");
FileWriter f_output = new FileWriter(output);
BufferedWriter bw = new BufferedWriter(f_output);
// we don´t need the first line (header)
line = b.readLine();
// read data
double lamps_values[] = new double[N_VALUES];
// double[] planRGB;
double planBright;
int contador = 0;
while ((line = b.readLine()) != null) {
++contador;
int index = line.indexOf(" ");
int activity = Integer.parseInt((line.substring(0, index)));
index = index + 1;
int n_index = line.indexOf(" ", index);
double natural_light = Double.parseDouble(line.substring(index, n_index));
double ideal_bright;
index = n_index;
State state = new State(activity, natural_light);
index = index + 2;
int last_index = index;
for (int j = 0; j < N_VALUES; j++) {
if (j == 2) {
index = line.indexOf("]", index);
} else {
index = line.indexOf(",", index);
}
lamps_values[j] = Integer.parseInt((line.substring(last_index, index)));
index = index + 2;
last_index = index;
}
index = line.length();
ideal_bright = Double.parseDouble(line.substring(last_index, index));
// before training
logger.warn("Before Training");
// planRGB = rl_algorithm.planRGB(state);
planBright = rl_algorithm.planBright(state);
logger.warn("natural light " + natural_light + " activity: " + activity + " light plan: "
+ Double.toString(planBright));
logger.warn("user values: " + Double.toString(ideal_bright));
// training the algorithm
// rl_algorithm.trainAlgorithmRGB(state, lamps_values);
rl_algorithm.trainAlgorithmBright(state, ideal_bright);
logger.warn("After Training");
planBright = rl_algorithm.planBright(state);
logger.warn("natural light " + natural_light + " activity: " + activity + " light plan: "
+ Double.toString(planBright));
logger.warn("user values: " + Double.toString(ideal_bright));
}
bw.close();
f_output.close();
b.close();
f.close();
;
} catch (Exception e) {
logger.error("Exception occurred during execution: {}", e.getMessage(), e);
}
rl_algorithm.saveModel();
}
/*
* @ActionDoc(text = "Test Method for training", returns = "algorithm trained")
* public static synchronized void trainRGB(@ParamDoc(name = "activity") int activity,
*
* @ParamDoc(name = "bright") String bright, @ParamDoc(name = "configuration") HSBType configuration)
* throws IOException {
* double[] users_lamps_values = new double[3];
*
* Color color = new Color(configuration.getRGB());
* users_lamps_values[0] = color.getRed();
* users_lamps_values[1] = color.getGreen();
* users_lamps_values[2] = color.getBlue();
*
* logger.warn("lamps values given by user: {} {} {}", Double.toString(users_lamps_values[0]),
* Double.toString(users_lamps_values[1]), Double.toString(users_lamps_values[2]));
* if (rl_algorithm == null) {
* initModel();
* }
* logger.warn("training algorithm");
*
* State state = new State(activity, Double.parseDouble(bright)*10);
* rl_algorithm.trainAlgorithmRGB(state, users_lamps_values);
*
* }
*
*
*
* @ActionDoc(text = "Test Method for getting the plan", returns = "plan")
* public static synchronized HashMap<String, HSBType> planRGB() throws IOException {
*
* if (rl_algorithm == null) {
* initModel();
* }
* logger.warn("getting plan");
* HashMap<String, HSBType> plan = new HashMap<String, HSBType>();
*
* double[] lamps_values = rl_algorithm.planRGB(state);
*
* for (int i = 0; i < N_LAMPS; i++) {
* int red = (int) Math.round(lamps_values[i][0]);
* int green = (int) Math.round(lamps_values[i][1]);
* int blue = (int) Math.round(lamps_values[i][2]);
* logger.warn("lamps values given by plan function: {} {} {}", red, green, blue);
* HSBType configuration = HSBType.fromRGB(red, green, blue);
* configuration.toRGB();
* plan.put("lamp" + Integer.toString(i), configuration);
*
* }
* return plan;
*
* }
*/
}
/**
* Copyright (c) 2010-2016 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openlicht.action.reinforcementlearning.internal;
import java.util.Dictionary;
import org.openhab.core.scriptengine.action.ActionService;
import org.osgi.service.cm.ConfigurationException;
import org.osgi.service.cm.ManagedService;
/**
* This class registers an OSGi service for the Main action.
*
* @author David Morales Rodríguez
*/
public class MainActionService implements ActionService, ManagedService {
/**
* Indicates whether this action is properly configured which means all
* necessary configurations are set. This flag can be checked by the
* action methods before executing code.
*/
/* default */ static boolean isProperlyConfigured = false;
public MainActionService() {
}
public void activate() {
}
public void deactivate() {
// deallocate Resources here that are no longer needed and
// should be reset when activating this binding again
}
@Override
public String getActionClassName() {
return Main.class.getCanonicalName();
}
@Override
public Class<?> getActionClass() {
return Main.class;
}
@Override
@SuppressWarnings("rawtypes")
public void updated(Dictionary config) throws ConfigurationException {
}
}
/**
* Copyright (c) 2010-2016 by the respective copyright holders.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package org.openlicht.action.reinforcementlearning.internal;
import org.osgi.framework.BundleActivator;
import org.osgi.framework.BundleContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Extension of the default OSGi bundle activator
*
* @author David Morales Rodríguez
*/
public final class MainActivator implements BundleActivator {
private static Logger logger = LoggerFactory.getLogger(MainActivator.class);
private static BundleContext context;
/**
* Called whenever the OSGi framework starts our bundle
*/
@Override
public void start(BundleContext bc) throws Exception {
context = bc;
logger.debug("Main action has been started.");
}
/**
* Called whenever the OSGi framework stops our bundle
*/
@Override
public void stop(BundleContext bc) throws Exception {
context = null;
logger.debug("Main action has been stopped.");
}
/**
* Returns the bundle context of this bundle
*
* @return the bundle context
*/
public static BundleContext getContext() {
return context;
}
}
package org.openlicht.action.reinforcementlearning.internal;
import java.util.Random;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
//abstract class Model
public abstract class Model {
protected int N_VALUES;
protected int N_LAMPS;
protected Random rnd = new Random();
protected static Logger logger = LoggerFactory.getLogger(Model.class);
public Model(int N_LAMPS, int N_VALUES) {
this.N_LAMPS = N_LAMPS;
this.N_VALUES = N_VALUES;
rnd.setSeed(System.currentTimeMillis());
}
// method to validate lamps values (RGB)
protected double[] validateLampsValues(double[] l_values) {
for (int i = 0; i < this.N_VALUES; ++i) {
if (l_values[i] > 255) {
l_values[i] = 255;
}
if (l_values[i] < 0) {
l_values[i] = 0;
}
}
return l_values;
}
// method to validate bright value
protected double validateBrightValue(double bright_value) {
if (bright_value > 100) {
bright_value = 100;
}
if (bright_value < 0) {
bright_value = 0;
}
return bright_value;
}
// training the model for the current state using the values given by the user
public abstract void trainBright(State state, double bright);
// training the model for the current state using the values given by the user
public abstract void trainRGB(State state, double[] user_lamps_values);
// get output (lams_values (RGB)) using exploitation
public abstract double[] getPlanRGB(State state);
// get output (lams_values (Bright)) using exploitation
public abstract double getPlanBright(State state);
// method to save the model
public abstract void saveModel();
}
package org.openlicht.action.reinforcementlearning.internal;
import java.io.File;
import org.encog.engine.network.activation.ActivationSigmoid;
import org.encog.ml.data.MLDataSet;
import org.encog.ml.data.basic.BasicMLDataSet;
import org.encog.ml.train.MLTrain;
import org.encog.neural.networks.BasicNetwork;
import org.encog.neural.networks.layers.BasicLayer;
import org.encog.neural.networks.training.propagation.back.Backpropagation;
import org.encog.persist.EncogDirectoryPersistence;
//This class implement a Rl model using two Neural networks build using encog
public class MultilayerPerceptron_Model extends Model {
// first NN to get rgb configuration
private BasicNetwork RGB_network;
// second NN to get bright configuration
private BasicNetwork Bright_network;
// constructor
public MultilayerPerceptron_Model(int N_LAMPS, int N_VALUES) {
super(N_LAMPS, N_VALUES);
// NN to predict RGB values
this.RGB_network = new BasicNetwork();
// no activation function,true->bias neurons, input layer has 2 neurons (activity, natural_light)
this.RGB_network.addLayer(new BasicLayer(null, true, 2));
// Sigmoid activation function,true->bias neurons, hidden layer has 6 neurons
this.RGB_network.addLayer(new BasicLayer(new ActivationSigmoid(), true, 6));
// Sigmoid activation function,false->bias neurons, output layer has 3 neurons (RGB values)
this.RGB_network.addLayer(new BasicLayer(new ActivationSigmoid(), false, 3));
this.RGB_network.getStructure().finalizeStructure();
this.RGB_network.reset();
// NN to predict bright
this.Bright_network = new BasicNetwork();
// no activation function,true->bias neurons, input layer has 2 neurons (activity, natural_light)
this.Bright_network.addLayer(new BasicLayer(null, true, 2));
// Sigmoid activation function,true->bias neurons, hidden layer has 10 neurons
this.Bright_network.addLayer(new BasicLayer(new ActivationSigmoid(), true, 10));
// Sigmoid activation function,false->bias neurons, output layer has 1 neurons (Bright value)
this.Bright_network.addLayer(new BasicLayer(new ActivationSigmoid(), false, 1));
this.Bright_network.getStructure().finalizeStructure();
this.Bright_network.reset();
}
// constructor with files to initialize NNs
public MultilayerPerceptron_Model(int N_LAMPS, int N_VALUES, String file_NNrgb, String file_NNbright) {
super(N_LAMPS, N_VALUES);
logger.warn("trying to load nn");
this.RGB_network = (BasicNetwork) EncogDirectoryPersistence.loadObject(new File(file_NNrgb));
this.Bright_network = (BasicNetwork) EncogDirectoryPersistence.loadObject(new File(file_NNbright));
logger.warn("nn initialized");
}
// method to save the model
@Override
public void saveModel() {
// save model
EncogDirectoryPersistence.saveObject(new File("\\openhab-2.3.0\\datasets\\RGB_NN"), this.RGB_network);
EncogDirectoryPersistence.saveObject(new File("\\openhab-2.3.0\\datasets\\Bright_NN"), this.Bright_network);
}
/////////////////////////////////////// aux
/////////////////////////////////////// functions////////////////////////////////////////////////////////////////
// function to normalize values. NN get values for a sigmoid function
private double normalizeActivityPlanRGB(int activity) {
return (activity - 2) * 1;
}
// function to normalize values. NN get values for a sigmoid function
private double normalizeNatural_lightPlanRGB(double natural_light) {
return ((natural_light - 50) / 8);
}
// function to normalize values. NN get values for a sigmoid function
private double normalizeActivityPlanBright(int activity) {
return (activity - 2) * 1;
}
// function to normalize values. NN get values for a sigmoid function
private double normalizeNatural_lightPlanBright(double natural_light) {
return ((natural_light - 50) / 8);
}
///////////////////////////////// train methods/////////////////////////////////////////////////////////////////
// train model for bright prediction
@Override
public void trainBright(State state, double ideal_bright) {
double[][] IDEAL = new double[1][1];
// scale for the NN
IDEAL[0][0] = ideal_bright / 100;
// normalize values to use sigmod functions
double activity_lamp = this.normalizeActivityPlanBright(state.getActivity());
double natural_light = this.normalizeNatural_lightPlanBright(state.getNatural_light());
double[][] INPUT = new double[1][2];
INPUT[0][0] = activity_lamp;
INPUT[0][1] = natural_light;
// training using backpropagation
MLDataSet trainingSet = new BasicMLDataSet(INPUT, IDEAL);
// learning rate of 3.5 and a momentum of 0.3
MLTrain train = new Backpropagation(this.Bright_network, trainingSet, 3.5, 0.3);
int epoch = 1;
do {
train.iteration();
// System.out.println("Epoch #" + epoch + "Error:" + train.getError());
epoch++;
} while (train.getError() > 0.005);
train.finishTraining();
// test the neural network
// System.out.println("Bright Neural Network Results:");
/*
* for (MLDataPair pair : trainingSet) {
* final MLData output = this.Bright_network.compute(pair.getInput());
* System.out.println(
* "INPUT= [" + pair.getInput().getData(0) + ", " + pair.getInput().getData(1) + "], actual= ["
* + output.getData(0) * 100 + "] ,ideal= [" + pair.getIdeal().getData(0) * 100 + "]");
* }
*/
}
@Override
public void trainRGB(State state, double[] user_lamps_values) {
double[][] IDEAL = new double[1][3];
// from RGB to %R %G %B
for (int i = 0; i < this.N_VALUES; i++) {
IDEAL[0][i] = user_lamps_values[i] / 255;
}
double activity_lamp = this.normalizeActivityPlanRGB(state.getActivity());
double natural_light = this.normalizeNatural_lightPlanRGB(state.getNatural_light());
double[][] INPUT = new double[1][2];
INPUT[0][0] = activity_lamp;
INPUT[0][1] = natural_light;
// training using backpropagation
MLDataSet trainingSet = new BasicMLDataSet(INPUT, IDEAL);
// learning rate of 3.5 and a momentum of 0.3
MLTrain train = new Backpropagation(this.RGB_network, trainingSet, 3.5, 0.3);
int epoch = 1;
do {
train.iteration();
System.out.println("Epoch #" + epoch + "Error:" + train.getError());
epoch++;
} while (train.getError() > 0.005);
train.finishTraining();
// test the neural network
/*
* System.out.println("RGB Neural Network Results:");
* for (MLDataPair pair : trainingSet) {
* final MLData output = this.RGB_network.compute(pair.getInput());
* System.out.println("INPUT= [" + pair.getInput().getData(0) + ", " + pair.getInput().getData(1)
* + "], actual= [" + output.getData(0) * 255 + ", " + output.getData(1) * 255 + ", "
* + output.getData(2) * 255 + "] ,ideal= [" + pair.getIdeal().getData(0) * 255 + ", "
* + pair.getIdeal().getData(1) * 255 + ", " + pair.getIdeal().getData(2) * 255 + "]");
* }
*/
}
///////////////////////////////////////////// exploitation
///////////////////////////////////////////// methods///////////////////////////////////////////////////////////////////
// exploitation method for RGB values
@Override
public double[] getPlanRGB(State state) {
double[] output = new double[this.N_VALUES];
// input must be an array, network.compute(input,output)...
double[] input = new double[2];
input[0] = this.normalizeActivityPlanRGB(state.getActivity());
input[1] = this.normalizeNatural_lightPlanBright(state.getNatural_light());
// get output
this.RGB_network.compute(input, output);
// to RGB
for (int i = 0; i < this.N_VALUES; i++) {
output[i] = output[i] * 255;
}
return output;
}
// exploitation method for Bright values
@Override
public double getPlanBright(State state) {
double aux[] = new double[1];
double output;
// input must be an array, network.compute(input,output)...
double[] input = new double[2];
input[0] = this.normalizeActivityPlanBright(state.getActivity());
input[1] = this.normalizeNatural_lightPlanBright(state.getNatural_light());
// get output
this.Bright_network.compute(input, aux);
// to bright
output = validateBrightValue(100 * aux[0]);
return output;
}
}
package org.openlicht.action.reinforcementlearning.internal;
import java.util.Random;
/*This class defines a RL agent*/
public class RLAgent {
private Random rnd = new Random();
// constant to do exploration
private static final double EPSILON = 0;
// variables that define our environment
// n_lamps: n° lamps
// n_values: n° values for each lamp
private int N_LAMPS;
private int N_VALUES;
private MultilayerPerceptron_Model model;
// method to save the model
public void saveModel() {
this.model.saveModel();
}
// contructor withoud initialized NN files
public RLAgent(int n_lamps, int n_values) {
rnd.setSeed(System.currentTimeMillis());
this.N_LAMPS = n_lamps;
this.N_VALUES = n_values;
model = new MultilayerPerceptron_Model(this.N_LAMPS, this.N_VALUES);
}
// constructor with initialized NN files
public RLAgent(int n_lamps, int n_values, String file_NNrgb, String file_NNbright) {
rnd.setSeed(System.currentTimeMillis());
this.N_LAMPS = n_lamps;
this.N_VALUES = n_values;
model = new MultilayerPerceptron_Model(this.N_LAMPS, this.N_VALUES, file_NNrgb, file_NNbright);
}
// method call from reinforcementLearning class to train the agent
// last_state: last state given
// ideal bright values
public void trainingAlgorithmBright(State state, double ideal_bright) {
// TRAIN THE SYSTEM
model.trainBright(state, ideal_bright);
}
// method call from reinforcementLearning class to train the agent
// last_state: last state given
// lamps_values: lamps values the algorithm gave back
public void trainingAlgorithmRGB(State state, double[] user_lamps_values) {
// TRAIN THE SYSTEM
model.trainRGB(state, user_lamps_values);
}
// method call from reinforcementLearning class to get a configuration for the lamps (RGB)
public double[] getPlanRGB(State state) {
if (decide_if_exploration()) {// do exploration
// not implemented
return null;
} else {// do exploitation
return model.getPlanRGB(state);
}
}
// method call from reinforcementLearning class to get a configuration for the lamps (RGB)
public double getPlanBright(State state) {
if (decide_if_exploration()) {// do exploration
// not implemented
return -1;
} else {// do exploitation
return model.getPlanBright(state);
}
}
// method to decide if we do exploration
private boolean decide_if_exploration() {
boolean exploration = false;
if (rnd.nextFloat() < EPSILON) {
exploration = true;
}
return exploration;
}
}
package org.openlicht.action.reinforcementlearning.internal;
/*This class implement a reinforcement learning algorithm */
public class ReinforcementLearningAlgorithm {
// reinforcement learning agent
private RLAgent agent;
private int N_LAMPS;
private int N_VALUES;
// constructor without initialized NN files
public ReinforcementLearningAlgorithm(int n_lamps, int n_values) {
this.N_LAMPS = n_lamps;
this.N_VALUES = n_values;
agent = new RLAgent(N_LAMPS, N_VALUES);
}
// constructor with initialized NN files
public ReinforcementLearningAlgorithm(int n_lamps, int n_values, String file_NNrgb, String file_NNbright) {
this.N_LAMPS = n_lamps;
this.N_VALUES = n_values;
agent = new RLAgent(N_LAMPS, N_VALUES, file_NNrgb, file_NNbright);
}
// we get the feedback from the user and we train our algorithm with it
public void trainAlgorithmRGB(State state, double[] users_lamps_values) {
this.agent.trainingAlgorithmRGB(state, users_lamps_values);
}
// we get the feedback from the user and we train our algorithm with it
public void trainAlgorithmBright(State state, double ideal_bright) {
this.agent.trainingAlgorithmBright(state, ideal_bright);
}
// get plan (RGB)
public double[] planRGB(State state) {
return this.agent.getPlanRGB(state);
}
// get plan (Bright)
public double planBright(State state) {
return this.agent.getPlanBright(state);
}
// method to save the model
public void saveModel() {
this.agent.saveModel();
}
}
\ No newline at end of file
package org.openlicht.action.reinforcementlearning.internal;
//class to define the state(activity, natural light).
public class State {
// user´s activity
private int activity;
private double natural_light;
public State(int activity, double natural_light) {
this.natural_light = natural_light;
this.activity = activity;
}
public int getActivity() {
return this.activity;
}
public double getNatural_light() {
return this.natural_light;
}
}
\ No newline at end of file
Bundle resources go in here!
\ No newline at end of file
......@@ -5,19 +5,13 @@ include 'openhab-mock'
include 'integration'
include ':benchmark'
include ':commons.color'
include ':skywriter-hue-integration'
include ':org.openhab.action.machinelearn'
include ':org.openlicht.action.reinforcementlearning'
include ':stub.org.openhab.core.scriptengine.action'
include ':feedbackloop.analyze'
include ':feedbackloop.plan'
include ':feedbackloop.execute'
include ':feedbackloop.api'
include ':feedbackloop.main'
include ':ml_test'
include ':feedbackloop.monitor'
include ':feedbackloop.learner'
include ':influx_test'
include ':eraser.spark'
include ':eraser.starter'
include ':feedbackloop.learner_backup'
......
/build/
/bin/
logs/
sourceSets {
main {
java {
srcDir 'src/main/java'
}
}
}
package org.openhab.core.scriptengine.action;
import java.lang.annotation.*;
/**
* Stub to make projects build using ActionDoc.
*
* @author rschoene - Initial contribution
*/
@Target(ElementType.METHOD)
@Inherited
@Retention(RetentionPolicy.RUNTIME)
public @interface ActionDoc {
String text();
String returns() default "";
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment