@@ -812,8 +812,8 @@ def setOptions(self, **opts):
812812 :key m_tol: relative tolerance for solution `m` for termination of iteration
813813 :type m_tol: `float`
814814 :default m_tol: 1e-4
815- :key grad_tol: tolerance for gradient relative to initial costfunction value for termination of iteration
816- :type grad_tol: `float`
815+ :key grad_tol: tolerance for gradient relative to initial cost function value for termination of iteration
816+ :type grad_tol: `float` or None
817817 :default grad_tol: 1e-4
818818 :key truncation: sets the number of previous LBFGS iterations to keep
819819 :type truncation : `int`
@@ -854,7 +854,10 @@ def setOptions(self, **opts):
854854 elif o == "m_tol" :
855855 self ._m_tol = max (float (opts [o ]), EPSILON )
856856 elif o == "grad_tol" :
857- self ._grad_tol = max (float (opts [o ]), EPSILON )
857+ if opts [o ] is None :
858+ self ._grad_tol = None
859+ else :
860+ self ._grad_tol = max (float (opts [o ]), EPSILON )
858861 elif o == 'historySize' or o == 'truncation' :
859862 assert opts [o ] > 2 , "Trancation must be greater than 2."
860863 self ._truncation = max (0 , int (opts [o ]))
@@ -946,7 +949,8 @@ def run(self, m):
946949 :rtype: m-type
947950 """
948951 assert self ._m_tol > 0.
949- assert self ._grad_tol > 0.
952+ if not self ._grad_tol is None :
953+ assert self ._grad_tol > 0.
950954 assert self ._iterMax > 1
951955 assert self ._relAlphaMin > 0.
952956 assert self ._truncation > 0
@@ -981,7 +985,6 @@ def run(self, m):
981985 while not converged and not break_down and k < self ._restart and iterCount < self ._iterMax :
982986 self .logger .info ("********** iteration %3d **********" % iterCount )
983987 self .logger .info ("\t F(m) = %g" % Fm )
984- print (Fm )
985988 # determine search direction
986989 p = - self ._twoLoop (H_scale , grad_Fm , s_and_y , m , args_m )
987990 # Now we call the line search with F(m+alpha*p)
@@ -1040,16 +1043,17 @@ def run(self, m):
10401043 args_new = self .getCostFunction ().getArgumentsAndCount (m_new )
10411044 grad_Fm_new = self .getCostFunction ().getGradientAndCount (m_new , * args_new )
10421045
1043- Ftol_abs = self ._grad_tol * abs (max (abs (Fm ), abs (Fm_new )))
1044- dFm = abs (Fm - Fm_new )
1045- flag = dFm <= Ftol_abs
1046- if flag :
1047- converged = True
1048- self .logger .info ("F(m) = %g" % Fm_new )
1049- self .logger .info ("Gradient has converged: |F-Fold|=%g < g_tol*max(|F|,|Fold|)=%g" % (dFm , Ftol_abs ))
1050- break
1051- else :
1052- self .logger .info ("Gradient checked: |F-Fold|=%g, g_tol*max(|F|,|Fold|)=%g" % (dFm , Ftol_abs ))
1046+ if not self ._grad_tol is None :
1047+ Ftol_abs = self ._grad_tol * abs (max (abs (Fm ), abs (Fm_new )))
1048+ dFm = abs (Fm - Fm_new )
1049+ flag = dFm <= Ftol_abs
1050+ if flag :
1051+ converged = True
1052+ self .logger .info ("F(m) = %g" % Fm_new )
1053+ self .logger .info ("Gradient has converged: |F-Fold|=%g < g_tol*max(|F|,|Fold|)=%g" % (dFm , Ftol_abs ))
1054+ break
1055+ else :
1056+ self .logger .info ("Gradient checked: |F-Fold|=%g, g_tol*max(|F|,|Fold|)=%g" % (dFm , Ftol_abs ))
10531057
10541058 delta_g = grad_Fm_new - grad_Fm
10551059 rho = self .getCostFunction ().getDualProductAndCount (delta_m , delta_g )
0 commit comments