21#include <yarp/math/Math.h>
22#include <yarp/math/Rand.h>
27#include <IpIpoptApplication.hpp>
29#define CAST_IPOPTAPP(x) (static_cast<Ipopt::IpoptApplication*>(x))
32using namespace yarp::os;
33using namespace yarp::sig;
34using namespace yarp::math;
58 const deque<Vector> &
in;
59 const deque<Vector> &
out;
64 bool getBounds(
const string &tag,
double &min,
double &max)
67 if (Bottle *b=
bounds.find(tag).asList())
71 min=b->get(0).asFloat64();
72 max=b->get(1).asFloat64();
84 for (
size_t i=0; i<
IW.size(); i++)
85 for (
size_t j=0; j<
IW.front().length(); j++, k++)
88 for (
size_t i=0; i<
LW.size(); i++)
89 for (
size_t j=0; j<
LW.front().length(); j++, k++)
92 for (
size_t i=0; i<
b1.length(); i++, k++)
95 for (
size_t i=0; i<
b2.length(); i++, k++)
100 void fillVar(Ipopt::Number *
x,
const Ipopt::Number *x_l,
const Ipopt::Number *x_u)
103 for (
size_t i=0; i<
IW.size(); i++)
104 for (
size_t j=0; j<
IW.front().length(); j++, k++)
105 x[k]=std::min(x_u[k],std::max(
IW[i][j],x_l[k]));
107 for (
size_t i=0; i<
LW.size(); i++)
108 for (
size_t j=0; j<
LW.front().length(); j++, k++)
109 x[k]=std::min(x_u[k],std::max(
LW[i][j],x_l[k]));
111 for (
size_t i=0; i<
b1.length(); i++, k++)
112 x[k]=std::min(x_u[k],std::max(
b1[i],x_l[k]));
114 for (
size_t i=0; i<
b2.length(); i++, k++)
115 x[k]=std::min(x_u[k],std::max(
b2[i],x_l[k]));
121 const bool _randomInit,
const deque<Vector> &_in,
122 const deque<Vector> &_out, deque<Vector> &_pred) :
125 IW(_net.get_IW()),
LW(_net.get_LW()),
126 b1(_net.get_b1()),
b2(_net.get_b2())
139 bool get_nlp_info(Ipopt::Index &
n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g,
140 Ipopt::Index &nnz_h_lag, IndexStyleEnum &index_style)
142 n=
IW.size()*
IW.front().length()+
LW.size()*
LW.front().length()+
b1.length()+
b2.length();
143 m=nnz_jac_g=nnz_h_lag=0;
144 index_style=TNLP::C_STYLE;
151 Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
153 double min_IW,max_IW;
getBounds(
"IW",min_IW,max_IW);
154 double min_LW,max_LW;
getBounds(
"LW",min_LW,max_LW);
155 double min_b1,max_b1;
getBounds(
"b1",min_b1,max_b1);
156 double min_b2,max_b2;
getBounds(
"b2",min_b2,max_b2);
159 for (
size_t i=0; i<
IW.size(); i++)
161 for (
size_t j=0; j<
IW.front().length(); j++, k++)
168 for (
size_t i=0; i<
LW.size(); i++)
170 for (
size_t j=0; j<
LW.front().length(); j++, k++)
177 for (
size_t i=0; i<
b1.size(); i++, k++)
183 for (
size_t i=0; i<
b2.size(); i++, k++)
194 bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U,
195 Ipopt::Index m,
bool init_lambda, Ipopt::Number *lambda)
197 Ipopt::Number *x_l=
new Ipopt::Number[
n];
198 Ipopt::Number *x_u=
new Ipopt::Number[
n];
199 Ipopt::Number *g_l=
new Ipopt::Number[
n];
200 Ipopt::Number *g_u=
new Ipopt::Number[
n];
206 for (Ipopt::Index i=0; i<
n; i++)
207 x[i]=Rand::scalar(x_l[i],x_u[i]);
221 bool eval_f(Ipopt::Index
n,
const Ipopt::Number *
x,
bool new_x,
222 Ipopt::Number &obj_value)
227 for (
size_t i=0; i<
in.size(); i++)
233 obj_value/=
in.size();
239 Ipopt::Number *grad_f)
245 bool eval_g(Ipopt::Index
n,
const Ipopt::Number *
x,
bool new_x,
246 Ipopt::Index m, Ipopt::Number *g)
253 Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow,
254 Ipopt::Index *jCol, Ipopt::Number *values)
260 bool eval_h(Ipopt::Index
n,
const Ipopt::Number *
x,
bool new_x,
261 Ipopt::Number obj_factor, Ipopt::Index m,
const Ipopt::Number *lambda,
262 bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow,
263 Ipopt::Index *jCol, Ipopt::Number *values)
270 const Ipopt::Number *
x,
const Ipopt::Number *z_L,
271 const Ipopt::Number *z_U, Ipopt::Index m,
272 const Ipopt::Number *g,
const Ipopt::Number *lambda,
273 Ipopt::Number obj_value,
const Ipopt::IpoptData *ip_data,
274 Ipopt::IpoptCalculatedQuantities *ip_cq)
278 for (
size_t i=0; i<
in.size(); i++)
282 this->pred.push_back(
pred);
296 App=
new Ipopt::IpoptApplication();
301 CAST_IPOPTAPP(
App)->Options()->SetStringValue(
"nlp_scaling_method",
"gradient-based");
302 CAST_IPOPTAPP(
App)->Options()->SetStringValue(
"hessian_approximation",
"limited-memory");
303 CAST_IPOPTAPP(
App)->Options()->SetStringValue(
"jacobian_approximation",
"finite-difference-values");
319 const deque<Vector> &in,
const deque<Vector> &
out,
320 deque<Vector> &pred,
double &
error)
322 if ((in.size()==0) || (in.size()!=
out.size()))
326 IW.assign(numHiddenNodes,Vector(in.front().length(),0.0));
327 b1.resize(numHiddenNodes,0.0);
330 LW.assign(
out.front().length(),Vector(numHiddenNodes,0.0));
331 b2.resize(
out.front().length(),0.0);
340 const Vector &in_front=in.front();
341 for (
size_t i=0; i<in_front.length(); i++)
344 range.
min=range.
max=in_front[i];
346 range.
min=-1.0; range.
max=1.0;
349 for (
size_t i=1; i<in.size(); i++)
351 for (
size_t j=0; j<in_front.length(); j++)
359 for (
size_t i=0; i<
inMinMaxX.size(); i++)
366 const Vector &out_front=
out.front();
367 for (
size_t i=0; i<out_front.length(); i++)
370 range.
min=range.
max=out_front[i];
372 range.
min=-1.0; range.
max=1.0;
375 for (
size_t i=1; i<
out.size(); i++)
377 for (
size_t j=0; j<out_front.length(); j++)
395 Ipopt::ApplicationReturnStatus status=
CAST_IPOPTAPP(
App)->OptimizeTNLP(GetRawPtr(nlp));
397 error=nlp->get_error();
398 return (status==Ipopt::Solve_Succeeded);
404 deque<Vector> &pred,
double &
error)
406 if ((in.size()==0) || (in.size()!=
out.size()) || !
configured)
410 Ipopt::ApplicationReturnStatus status=
CAST_IPOPTAPP(
App)->OptimizeTNLP(GetRawPtr(nlp));
412 error=nlp->get_error();
413 return (status==Ipopt::Solve_Succeeded);
std::deque< minmax > inMinMaxX
std::deque< minmax > inMinMaxY
std::deque< yarp::sig::Vector > IW
std::deque< minmax > outMinMaxX
virtual yarp::sig::Vector predict(const yarp::sig::Vector &x) const
Predict the output given a certain input to the network.
std::deque< minmax > outMinMaxY
std::deque< yarp::sig::Vector > LW
const deque< Vector > & in
virtual double get_error() const
bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Number *g)
bool getBounds(const string &tag, double &min, double &max)
bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, Ipopt::Index &nnz_jac_g, Ipopt::Index &nnz_h_lag, IndexStyleEnum &index_style)
bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, Ipopt::Number *x_u, Ipopt::Index m, Ipopt::Number *g_l, Ipopt::Number *g_u)
ff2LayNNTrainNLP(ff2LayNNTrain &_net, const Property &_bounds, const bool _randomInit, const deque< Vector > &_in, const deque< Vector > &_out, deque< Vector > &_pred)
void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, const Ipopt::Number *x, const Ipopt::Number *z_L, const Ipopt::Number *z_U, Ipopt::Index m, const Ipopt::Number *g, const Ipopt::Number *lambda, Ipopt::Number obj_value, const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq)
bool eval_h(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number obj_factor, Ipopt::Index m, const Ipopt::Number *lambda, bool new_lambda, Ipopt::Index nele_hess, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
void fillVar(Ipopt::Number *x, const Ipopt::Number *x_l, const Ipopt::Number *x_u)
const deque< Vector > & out
bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Index m, Ipopt::Index nele_jac, Ipopt::Index *iRow, Ipopt::Index *jCol, Ipopt::Number *values)
bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number &obj_value)
bool get_starting_point(Ipopt::Index n, bool init_x, Ipopt::Number *x, bool init_z, Ipopt::Number *z_L, Ipopt::Number *z_U, Ipopt::Index m, bool init_lambda, Ipopt::Number *lambda)
bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, Ipopt::Number *grad_f)
void fillNet(const Ipopt::Number *x)
Class to deal with training of Feed-Forward 2 layers Neural Network using IpOpt.
ff2LayNNTrain()
Default constructor.
void setBounds(const yarp::os::Property &bounds)
Allow specifying the bounds for training network's parameters.
yarp::os::Property bounds
virtual bool train(const unsigned int numHiddenNodes, const std::deque< yarp::sig::Vector > &in, const std::deque< yarp::sig::Vector > &out, std::deque< yarp::sig::Vector > &pred, double &error)
Train the network through optimization.
virtual bool retrain(const std::deque< yarp::sig::Vector > &in, const std::deque< yarp::sig::Vector > &out, std::deque< yarp::sig::Vector > &pred, double &error)
Retrain the network through optimization.
virtual ~ff2LayNNTrain()
Default destructor.
double norm2(const yarp::sig::Matrix &M, int col)
Returns the squared norm of the vector given in the form: matrix(:,col).
This file contains the definition of unique IDs for the body parts and the skin parts of the robot.