Skip to content
Snippets Groups Projects
Commit bb95a8e0 authored by René Schöne's avatar René Schöne
Browse files

Merge branch 'learner-url-load' into 'dev'

Learner url load

See merge request !2
parents 9dd512d9 66d83d8d
Branches
No related tags found
No related merge requests found
Showing
with 205 additions and 42 deletions
import java.util.*;
import de.tudresden.inf.st.eraser.util.MemberPrinter;
import de.tudresden.inf.st.eraser.util.JavaUtils;
aspect Imports {
......
aspect Location {
syn Optional<Location> Item.myLocation() {
if (this.hasLocation()) {
return Optional.of(this.getLocation());
} else {
return JavaUtils.ifPresentOrElseReturn(linkedThing(),
thing -> thing.hasLocation() ? Optional.of(thing.getLocation()) : Optional.empty(), () -> Optional.empty());
}
}
}
Location ::= <Label:String> SubLocation:Location ;
rel Location.Thing* <-> Thing.Location? ;
rel Location.Item* <-> Item.Location? ;
......@@ -14,7 +14,7 @@ aspect MachineLearning {
syn Leaf MachineLearningModel.classify();
//--- currentActivityName ---
syn String Root.currentActivityName() = de.tudresden.inf.st.eraser.util.JavaUtils.ifPresentOrElseReturn(
syn String Root.currentActivityName() = JavaUtils.ifPresentOrElseReturn(
currentActivity(),
Activity::getLabel,
() -> "no activity"
......@@ -64,7 +64,7 @@ aspect MachineLearning {
public void DummyMachineLearningModel.connectItems(List<String> itemNames) {
logger.info("Storing items to connect");
for (String itemName : itemNames) {
de.tudresden.inf.st.eraser.util.JavaUtils.ifPresentOrElse(getRoot().resolveItem(itemName),
JavaUtils.ifPresentOrElse(getRoot().resolveItem(itemName),
this::addItem,
() -> logger.warn("Could not resolve item '{}'", itemName));
}
......@@ -107,7 +107,7 @@ aspect MachineLearning {
//--- ActivityItem ---
@Override
public double ActivityItem.getState() {
return de.tudresden.inf.st.eraser.util.JavaUtils.ifPresentOrElseReturn(
return JavaUtils.ifPresentOrElseReturn(
getRoot().currentActivity(),
activity -> (double) activity.getIdentifier(),
() -> super.getState()
......
......@@ -49,6 +49,7 @@ InputNeuron : Neuron ;
rel InputNeuron.Item -> Item ;
HiddenNeuron : Neuron ::= <ActivationFormula:DoubleArrayDoubleFunction> ;
BiasNeuron : HiddenNeuron ;
OutputNeuron : HiddenNeuron ::= <Label:String> ;
DummyMachineLearningModel : MachineLearningModel ::= Current:DecisionTreeLeaf ;
......
......@@ -116,10 +116,6 @@ aspect Navigation {
return java.util.Optional.empty();
}
//--- containingChannel ---
inh Channel Link.containingChannel();
eq Channel.getLink().containingChannel() = this;
//--- containingThing ---
inh Thing Channel.containingThing();
eq Thing.getChannel().containingThing() = this;
......@@ -128,6 +124,16 @@ aspect Navigation {
inh NeuralNetworkRoot OutputLayer.containingNeuralNetwork();
eq NeuralNetworkRoot.getOutputLayer().containingNeuralNetwork() = this;
//--- linkedThing ---
syn Optional<Thing> Item.linkedThing() {
if (!this.hasChannel()) {
return Optional.empty();
}
Channel channel = this.getChannel();
Thing thing = channel.containingThing();
return Optional.of(thing);
}
//--- getRoot ---
inh Root ASTNode.getRoot();
eq Root.getChannelCategory().getRoot() = this;
......
......@@ -57,7 +57,7 @@ aspect NeuralNetwork {
//--- value ---
syn double Neuron.value();
syn double HiddenNeuron.value() {
eq HiddenNeuron.value() {
double[] inputs = new double[getInputs().size()];
for (int i=0; i<inputs.length; ++i) {
NeuronConnection connection = getInputList().get(i);
......@@ -69,7 +69,9 @@ aspect NeuralNetwork {
return result;
}
syn double InputNeuron.value() {
eq BiasNeuron.value() = 1;
eq InputNeuron.value() {
return getItem().getStateAsDouble();
}
......@@ -98,7 +100,7 @@ aspect NeuralNetwork {
}
String itemName = itemNames.get(i);
InputNeuron neuron = getInputNeuron(i);
de.tudresden.inf.st.eraser.util.JavaUtils.ifPresentOrElse(getRoot().resolveItem(itemName),
JavaUtils.ifPresentOrElse(getRoot().resolveItem(itemName),
neuron::setItem,
() -> logger.warn("Could not resolve item '{}'", itemName));
}
......@@ -165,6 +167,12 @@ aspect NeuralNetwork {
return good;
}
@Override
public boolean BiasNeuron.check() {
setActivationFormula(inputs -> 1.0);
return super.check();
}
//--- mlKind ---
inh String OutputLayer.mlKind();
inh String Neuron.mlKind();
......
......@@ -160,7 +160,7 @@ aspect Printing {
return new MemberPrinter("Channel")
.addRequired("id", getID())
.addRequired("type", getType(), ChannelType::getID)
.addIds("links", getNumLink(), getLinkList(), Link::getItem)
.addIds("links", getLinkedItems())
.build();
}
......
// ---------------- Main ------------------------------
Root ::= Thing* Group* ThingType* ChannelType* ChannelCategory* ItemCategory* User* MqttRoot InfluxRoot
MachineLearningRoot Rule* ;
MachineLearningRoot Rule* Location* ;
// ---------------- Users ------------------------------
User : LabelledModelElement ;
......
......@@ -16,10 +16,9 @@ abstract ChannelCategory ;
DefaultChannelCategory : ChannelCategory ::= <Value:DefaultChannelCategoryValue> ;
SimpleChannelCategory : ChannelCategory ::= <Value:String> ;
Channel : ModelElement ::= Link* ;
Channel : ModelElement ::= ;
rel Channel.Type -> ChannelType ;
Link ::= <Item:Item> ;
rel Channel.LinkedItem* <-> Item.Channel? ;
Parameter : DescribableModelElement ::= <Type:ParameterValueType> [DefaultValue:ParameterDefaultValue] <Context:String> <Required:boolean> ;
ParameterDefaultValue ::= <Value:String> ;
......
......@@ -311,10 +311,8 @@ public class OpenHab2Importer {
private void update(Root model, LinkData[] linkList) {
for (LinkData linkData : linkList) {
Link link = new Link();
ifPresent(model.resolveChannel(linkData.channelUID), "Channel", linkData,
channel -> channel.addLink(link));
ifPresent(model.resolveItem(linkData.itemName), "Item", linkData, link::setItem);
channel -> ifPresent(model.resolveItem(linkData.itemName), "Item", linkData, channel::addLinkedItem));
}
}
......
......@@ -73,7 +73,7 @@ public class EraserParserHelper {
this.root.getMqttRoot().ensureCorrectPrefixes();
resolveList(channelMap, missingChannelListMap, Thing::addChannel);
resolveList(itemMap, missingItemLinkListMap, (channel, item) -> channel.addLink(new Link(item)));
resolveList(itemMap, missingItemLinkListMap, Channel::addLinkedItem);
resolveList(groupMap, missingSubGroupListMap, Group::addGroup);
resolveList(itemMap, missingItemListMap, this::addItemToGroup);
resolveList(channelTypeMap, missingChannelTypeListMap, ThingType::addChannelType);
......
......@@ -142,7 +142,7 @@
{
"channelUID": "openlicht:polar-m600:342dfc32:rotation-y",
"configuration": {},
"itemName": "watch_acceleration_y"
"itemName": "watch_rotation_y"
},
{
"channelUID": "openlicht:polar-m600:342dfc32:rotation-z",
......
......@@ -77,7 +77,7 @@ Channel: id="openlicht:polar-m600:342dfc32:activity" type="openlicht:activity-ty
Channel: id="openlicht:polar-m600:342dfc32:brightness" type="openlicht:brightness-type" links=["polar_brightness"] ;
Channel: id="openlicht:polar-m600:342dfc32:heart-rate" type="openlicht:heart-rate-type" ;
Channel: id="openlicht:polar-m600:342dfc32:rotation-x" type="openlicht:rotation-type" links=["watch_rotation_x"] ;
Channel: id="openlicht:polar-m600:342dfc32:rotation-y" type="openlicht:rotation-type" links=["watch_acceleration_y"] ;
Channel: id="openlicht:polar-m600:342dfc32:rotation-y" type="openlicht:rotation-type" links=["watch_rotation_y"] ;
Channel: id="openlicht:polar-m600:342dfc32:rotation-z" type="openlicht:rotation-type" links=["watch_rotation_z"] ;
Channel: id="openlicht:polar-m600:342dfc32:steps" type="openlicht:steps-type" ;
Channel: id="openlicht:samsung-s6:2ca84896:brightness" type="openlicht:brightness-type" links=["samsung_brightness"] ;
......
......@@ -10,6 +10,7 @@ import de.tudresden.inf.st.eraser.feedbackloop.api.Learner;
import de.tudresden.inf.st.eraser.feedbackloop.api.Plan;
import de.tudresden.inf.st.eraser.feedbackloop.api.model.Model;
import de.tudresden.inf.st.eraser.feedbackloop.execute.ExecuteImpl;
import de.tudresden.inf.st.eraser.feedbackloop.learner.LearnerHelper;
import de.tudresden.inf.st.eraser.feedbackloop.learner.LearnerImpl;
import de.tudresden.inf.st.eraser.feedbackloop.plan.PlanImpl;
import de.tudresden.inf.st.eraser.jastadd.model.DummyMachineLearningModel;
......@@ -30,8 +31,10 @@ import org.apache.logging.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
......@@ -127,7 +130,18 @@ public class EraserStarter {
logger.info("Reading preference learning from file {}", settings.preference.file);
Learner learner = new LearnerImpl();
// there should be a method to load a model using an URL
Model preference = learner.getTrainedModel(settings.preference.realURL(), settings.preference.id);
boolean loadingSuccessful = false;
try (InputStream input = settings.preference.realURL().openStream()) {
loadingSuccessful = learner.loadModelFromFile(input, settings.preference.id,
Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList());
} catch (IOException e) {
logger.catching(e);
loadingSuccessful = false;
}
// Model preference = learner.getTrainedModel(settings.preference.realURL(), settings.preference.id);
logger.debug("Loading of {} was {}", settings.preference.realURL(), loadingSuccessful ? "successful" : "failed");
if (loadingSuccessful) {
Model preference = learner.getTrainedModel(settings.preference.id);
NeuralNetworkRoot neuralNetwork = LearnerHelper.transform(preference);
if (neuralNetwork == null) {
logger.error("Could not create preference model, see possible previous errors.");
......@@ -139,6 +153,11 @@ public class EraserStarter {
item -> neuralNetwork.getOutputLayer().setAffectedItem(item),
() -> logger.error("Output item not set from value '{}'", settings.preference.affectedItem));
}
} else {
// loading was not successful
logger.warn("Falling back to dummy preference learning");
model.getMachineLearningRoot().setPreferenceLearning(DummyMachineLearningModel.createDefault());
}
}
model.getMachineLearningRoot().getPreferenceLearning().connectItems(settings.preference.items);
if (!model.getMachineLearningRoot().getActivityRecognition().check()) {
......
......@@ -36,7 +36,7 @@ preference:
# File to read in. Expected format = eg
file: preference.eg
# Use dummy model in which the current activity is directly editable. Default: false.
dummy: true
dummy: false
# Model id. Default: 1.
id: 1
# Items to connect to inputs
......@@ -45,8 +45,7 @@ preference:
- datetime_day
- datetime_hour
- datetime_minute
- bias
- activity
# - activity
# Item to change with classification result
affectedItem: iris1_item
......
package de.tudresden.inf.st.eraser.feedbackloop.api;
import java.io.File;
import java.io.InputStream;
import java.net.URL;
import java.util.List;
......@@ -35,7 +37,7 @@ public interface Learner {
* Method for loading a neural network from a file.
* Please note that the normalizer are note loaded file , because it is assumed that the mins and maxes are saved anyway in the meta data of the data sets or items.
*
* @param path - path to the save folder of the model files e.g. C:\models\
* @param file file to load the model from
* @param modelID - ID of the BasicNetwork.
* @param inputMaxes - list that contains max values of all input columns (sensors) e.g. light intensity 100
* @param inputMins - list that contains min values of all input columns (sensors) e.g. light intensity 0
......@@ -43,7 +45,22 @@ public interface Learner {
* @param targetMins - list that contains min values of all output columns (results) e.g. brigthness 0 for preference learning
* @return true - model loading was successful
* */
boolean loadModelFromFile(String path, int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
boolean loadModelFromFile(File file, int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
List<Integer> targetMins);
/**
* Method for loading a neural network from an input stream.
* Please note that the normalizer are note loaded file , because it is assumed that the mins and maxes are saved anyway in the meta data of the data sets or items.
*
* @param input stream to load the model from
* @param modelID - ID of the BasicNetwork.
* @param inputMaxes - list that contains max values of all input columns (sensors) e.g. light intensity 100
* @param inputMins - list that contains min values of all input columns (sensors) e.g. light intensity 0
* @param targetMaxes - list that contains max values of all output columns (results) e.g. brigthness 100 for preference learning
* @param targetMins - list that contains min values of all output columns (results) e.g. brigthness 0 for preference learning
* @return true - model loading was successful
* */
boolean loadModelFromFile(InputStream input, int modelID, List<Integer> inputMaxes, List<Integer> inputMins, List<Integer> targetMaxes,
List<Integer> targetMins);
/**
......@@ -102,6 +119,7 @@ public interface Learner {
* */
Model getTrainedModel(int modelID);
@Deprecated
Model getTrainedModel(URL url, int modelID);
/**
......
package de.tudresden.inf.st.eraser.feedbackloop.api;
import java.time.Instant;
/**
* This interface represents the connection from a machine learning model back to the knowledge base.
* It decodes the output of the machine learning model and outputs the result of the classification.
*
* @author rschoene - Initial contribution
*/
@SuppressWarnings("unused")
public interface MachineLearningDecoder {
/**
* Execute the machine learning model and returns the classification result.
* @return the result of the classification
*/
MachineLearningResult classify();
// less important
/**
* Returns the time when the model was last updated, i.e., when the last training was completed.
* @return the time when the model was last updated, or <code>null</code> if the model was not trained yet
*/
Instant lastModelUpdate();
}
package de.tudresden.inf.st.eraser.feedbackloop.api;
import de.tudresden.inf.st.eraser.jastadd.model.Item;
import de.tudresden.inf.st.eraser.jastadd.model.Root;
import java.util.List;
/**
* This interface represents the connection from knowledge base to one machine learning model.
* It takes information from the knowledge base, and encodes them to a representation that is readable both for
* the used technique and the purpose of the machine learning model.
*
* @author rschoene - Initial contribution
*/
@SuppressWarnings("unused")
public interface MachineLearningEncoder {
/**
* Update when new data is available.
* @param model The underlying model
* @param changedItems A list of items whose state has changed
*/
void newData(Root model, List<Item> changedItems);
// to be discussed, in which form this is specified
/**
* Get the items that this model is supposed to change.
* @return the list of targeted items
*/
List<Item> getTargets();
// to be discussed, in which form this is specified
/**
* Get the items which are relevant for the decision making of this model.
* @return the list of items relevant for decision making
*/
List<Item> getRelevantItems();
// to be discussed, if this is necessary
/**
* Explicit hint for this model to start/trigger training. The model might ignore this hint.
*/
void triggerTraining();
}
package de.tudresden.inf.st.eraser.feedbackloop.api;
import de.tudresden.inf.st.eraser.jastadd.model.ItemPreference;
import java.util.List;
/**
* Representation of a classification result using a MachineLearningModel.
*
* @author rschoene - Initial contribution
*/
@SuppressWarnings("unused")
public interface MachineLearningResult {
// Object rawClass();
// double rawConfidence();
// can be used for both activity and preferences
/**
* Get the result as a list of item preferences, i.e., new states to be set for those items.
* @return the classification result as item preferences
*/
List<ItemPreference> getPreferences();
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment