Commit f336b9dc authored by Armand S's avatar Armand S

assignment 1 data and code

Added :
-cs229 notes necessary for assignment 1
-assignment 1 data
-code for logistic regression
parent b077da4b
File added
1.3432504e+00 -1.3311479e+00
1.8205529e+00 -6.3466810e-01
9.8632067e-01 -1.8885762e+00
1.9443734e+00 -1.6354520e+00
9.7673352e-01 -1.3533151e+00
1.9458584e+00 -2.0443278e+00
2.1075153e+00 -2.1256684e+00
2.0703730e+00 -2.4634101e+00
8.6864964e-01 -2.4119348e+00
1.8006594e+00 -2.7739689e+00
3.1283787e+00 -3.4452432e+00
3.0947429e+00 -3.6446145e+00
2.9086652e+00 -4.0065037e+00
2.6770338e+00 -3.0198592e+00
2.7458671e+00 -2.7100561e+00
4.1714647e+00 -3.4622482e+00
3.9313220e+00 -2.1099044e+00
4.3786870e+00 -2.3804743e+00
4.8016565e+00 -3.3803344e+00
4.1661050e+00 -2.8138844e+00
2.4670141e+00 -1.6108444e+00
3.4826743e+00 -1.5533872e+00
3.3652482e+00 -1.8164936e+00
2.8772788e+00 -1.8511689e+00
3.1090444e+00 -1.6384946e+00
2.2183701e+00 7.4279558e-02
1.9949873e+00 1.6268659e-01
2.9500308e+00 1.6873016e-02
2.0216009e+00 1.7227387e-01
2.0486921e+00 -6.3581041e-01
8.7548563e-01 -5.4586168e-01
5.7079941e-01 -3.3278660e-02
1.4266468e+00 -7.5288337e-01
7.2265633e-01 -8.6691930e-01
9.5346198e-01 -1.4896956e+00
4.8333333e+00 7.0175439e-02
4.3070175e+00 1.4152047e+00
6.0321637e+00 4.5029240e-01
5.4181287e+00 -2.7076023e+00
3.4590643e+00 -2.8245614e+00
2.7280702e+00 -9.2397661e-01
1.0029240e+00 7.7192982e-01
3.6637427e+00 -7.7777778e-01
4.3070175e+00 -1.0409357e+00
3.6929825e+00 -1.0526316e-01
5.7397661e+00 -1.6257310e+00
4.9795322e+00 -1.5087719e+00
6.5000000e+00 -2.9122807e+00
5.2426901e+00 9.1812865e-01
1.6754386e+00 5.6725146e-01
5.1708997e+00 1.2103667e+00
4.8795188e+00 1.6081848e+00
4.6649870e+00 1.0695532e+00
4.4934321e+00 1.2351592e+00
4.1512967e+00 8.6721260e-01
3.7177080e+00 1.1517200e+00
3.6224477e+00 1.3106769e+00
3.0606943e+00 1.4857163e+00
7.0718465e+00 -3.4961651e-01
6.0391832e+00 -2.4756832e-01
6.6747480e+00 -1.2484766e-01
6.8461291e+00 2.5977167e-01
6.4270724e+00 -1.4713863e-01
6.8456065e+00 1.4754967e+00
7.7054006e+00 1.6045555e+00
6.2870658e+00 2.4156427e+00
6.9810956e+00 1.2599865e+00
7.0990172e+00 2.2155151e+00
5.5275479e+00 2.9968421e-01
5.8303489e+00 -2.1974408e-01
6.3594527e+00 2.3944217e-01
6.1004524e+00 -4.0957414e-02
5.6237412e+00 3.7135914e-01
5.8836969e+00 2.7768186e+00
5.5781611e+00 3.0682889e+00
7.0050662e+00 -2.5781727e-01
4.4538114e+00 8.3941831e-01
5.6495924e+00 1.3053929e+00
4.6337489e+00 1.9467546e+00
3.6986847e+00 2.2594084e+00
4.1193005e+00 2.5474510e+00
4.7665558e+00 2.7531209e+00
3.0812098e+00 2.7985255e+00
4.0730994e+00 -3.0292398e+00
3.4883041e+00 -1.8888889e+00
7.6900585e-01 1.2105263e+00
1.5000000e+00 3.8128655e+00
5.7982456e+00 -2.0935673e+00
6.8114529e+00 -8.3456730e-01
7.1106096e+00 -1.0201158e+00
7.4941520e+00 -1.7426901e+00
3.1374269e+00 4.2105263e-01
1.6754386e+00 5.0877193e-01
2.4941520e+00 -8.6549708e-01
4.7748538e+00 9.9415205e-02
5.8274854e+00 -6.9005848e-01
2.2894737e+00 1.9707602e+00
2.4941520e+00 1.4152047e+00
2.0847953e+00 1.3567251e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
-1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
1.0000000e+00
This diff is collapsed.
This diff is collapsed.
import numpy as np
import matplotlib.pyplot as plt
x = np.loadtxt(open("data/logistic_x.txt","r")) #input array
y = np.loadtxt(open("data/logistic_y.txt","r")) #target array
m = x.shape[0] #number of training examples
x = np.c_[np.ones(m),x] #append intercept term to x
y = np.expand_dims(y, axis=1) #reshape y for array broadcasting
theta = np.zeros((x.shape[1],1)) #theta: parameter array
def sigmoid_function(x):
x = np.clip( x, -500, 500 ) #prevent overflow
x = 1.0/( 1 + np.exp( -x ))
return x
def gradient_J(theta,x,y):
#gradJ: gradient of the logistic cost function with respect to the parameter array theta
a = np.multiply(y,x)
b = 1-sigmoid_function(np.multiply(y,np.dot(x,theta)))
grad_J = (-1/m)*np.sum(np.multiply(a,b),axis=0)
if theta.shape[0] != grad_J.shape[0]:
print("The dimensions of the gradient of the cost function J with respect to the vector theta are incorrect.")
return grad_J
def hessian_J(theta,x,y):
a = sigmoid_function(y*np.dot(x,theta))
right_term = a*(1-a)
left_term = np.einsum("ij,ik->ijk",x,x)
H = np.einsum('ijk,il->jk',left_term,right_term)
return H/m
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def newton(theta,x,y,tolerance=0.01,itercount=False):
niter=0
while True:
G = gradient_J(theta,x,y)
H = hessian_J(theta,x,y)
c = np.expand_dims(np.dot(np.linalg.inv(H),G),axis=1)
theta_1 = theta - c
if (rel_error(theta_1,theta)<tolerance):
break
theta = theta_1
niter+=1
if itercount:
print('Number of loop iterations:',niter)
return theta
theta = newton(theta,x,y)
print("parameter vector theta:",theta)
#Plot Training data and decision boundary fit by logistic regression
#plt.title("Training data and decision boundary fit by logistic regression")
plus = np.where(y>0)[0]
minus = np.where(y<0)[0]
plt.scatter(x[plus,1],x[plus,2],color = "r", marker="+",label="y= 1")
plt.scatter(x[minus,1],x[minus,2],color="b", marker="_",label="y= -1")
h = np.dot(x,theta)
plt.xlabel("x1")
plt.ylabel("x2")
def boundary(theta,x):
a = -theta[1]/theta[2]
b = -theta[0]/theta[2]
return a*x + b
# define bounds of the domain
min1, max1 = x[:, 1].min()-1, x[:, 1].max()+1
min2, max2 = x[:, 2].min()-1, x[:, 2].max()+1
#plot decision boundary
xbound = np.arange(min1,max1,0.1)
ybound = boundary(theta,xbound)
plt.plot(xbound,ybound,label="decision boundary")
plt.legend()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment