| 
 | 1 | +struct AdaptiveActivation{T}  | 
 | 2 | +  a::T  | 
 | 3 | +  n::T  | 
 | 4 | +end  | 
 | 5 | + | 
 | 6 | +Flux.@functor AdaptiveActivation  | 
 | 7 | + | 
 | 8 | +(fn::AdaptiveActivation)(x) = (fn.n * fn.a) .* x # to be worked on (for weight tying)  | 
 | 9 | + | 
 | 10 | + | 
 | 11 | +struct NonlinearActivation{T}  | 
 | 12 | +  σ::T  | 
 | 13 | +end  | 
 | 14 | + | 
 | 15 | +Flux.@functor NonlinearActivation  | 
 | 16 | + | 
 | 17 | +(a::NonlinearActivation)(x) = (a.σ).(x)  | 
 | 18 | + | 
 | 19 | + | 
 | 20 | +function AdaptiveActivationFeedForwardNetwork(N::Integer, in::Integer, out::Integer, σ = Identity, n::Integer; nn_param_init = glorot_uniform)  | 
 | 21 | +  # another parameter would be the type of adaptive fn to be used  | 
 | 22 | +  # N = no. of hidden layers  | 
 | 23 | + | 
 | 24 | +  a = 1/n # initial a scaled such that n*a=1 ?  | 
 | 25 | +  function slope_recovery_loss_func(phi, θ, p)  | 
 | 26 | +    # calculate the slope_recovery loss function here as a function of the θ parameters that are generated for this  | 
 | 27 | +    # network  | 
 | 28 | +    for i in 1:1:length(θ):  | 
 | 29 | +      # the loss  | 
 | 30 | +      """  | 
 | 31 | +      if adaptive_fn_without_slope_recovery  | 
 | 32 | +        0  | 
 | 33 | +      elseif with_slope_recovery_layerwise  | 
 | 34 | +        ...  | 
 | 35 | +      elseif neuronwise  | 
 | 36 | +        ...  | 
 | 37 | +      else  | 
 | 38 | +        error  | 
 | 39 | +      """  | 
 | 40 | + | 
 | 41 | +    return regularizer_loss  | 
 | 42 | +  end  | 
 | 43 | + | 
 | 44 | +  layer = Flux.Chain(  | 
 | 45 | +    Dense(in, out, σ=identity; bias=true, init=nn_param_init),  | 
 | 46 | +    AdaptiveActivation(n, a),  | 
 | 47 | +    NonlinearActivation(nonlinearity),  | 
 | 48 | +  ) # to be stacked for as many hidden layers specified (N)  | 
 | 49 | + | 
 | 50 | +  return (network=Flux.Chain(...), loss_func=slope_recovery_loss_func)  | 
 | 51 | +end  | 
0 commit comments