|
a |
|
b/combinedDeepLearningActiveContour/functions/computeNumericalGradient.m |
|
|
1 |
function numgrad = computeNumericalGradient(J, theta) |
|
|
2 |
% numgrad = computeNumericalGradient(J, theta) |
|
|
3 |
% theta: a vector of parameters |
|
|
4 |
% J: a function that outputs a real-number. Calling y = J(theta) will return the |
|
|
5 |
% function value at theta. |
|
|
6 |
|
|
|
7 |
% Initialize numgrad with zeros |
|
|
8 |
numgrad = zeros(size(theta)); |
|
|
9 |
|
|
|
10 |
%% ---------- YOUR CODE HERE -------------------------------------- |
|
|
11 |
% Instructions: |
|
|
12 |
% Implement numerical gradient checking, and return the result in numgrad. |
|
|
13 |
% (See Section 2.3 of the lecture notes.) |
|
|
14 |
% You should write code so that numgrad(i) is (the numerical approximation to) the |
|
|
15 |
% partial derivative of J with respect to the i-th input argument, evaluated at theta. |
|
|
16 |
% I.e., numgrad(i) should be the (approximately) the partial derivative of J with |
|
|
17 |
% respect to theta(i). |
|
|
18 |
% |
|
|
19 |
% Hint: You will probably want to compute the elements of numgrad one at a time. |
|
|
20 |
|
|
|
21 |
EPSILON=1e-4; |
|
|
22 |
N=length(theta); |
|
|
23 |
In=eye(N); |
|
|
24 |
for k=1:N |
|
|
25 |
if mod(k,100)==0 |
|
|
26 |
k |
|
|
27 |
end; |
|
|
28 |
e_i=In(:,k); |
|
|
29 |
theta_ip=theta+EPSILON*e_i; |
|
|
30 |
theta_im=theta-EPSILON*e_i; |
|
|
31 |
numgrad(k)=(J(theta_ip)-J(theta_im))/(2*EPSILON); |
|
|
32 |
end |
|
|
33 |
|
|
|
34 |
%% --------------------------------------------------------------- |
|
|
35 |
end |