Quantcast
Viewing all articles
Browse latest Browse all 4448

Andrew Ng python implementation on Linear Regression

@Iron_man wrote:

import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
data = pd.read_csv(‘ex1data2.csv’)
data.columns = [‘chirps’,‘Temperature’]
plt.scatter(data[‘chirps’],data[‘Temperature’])
X = pd.DataFrame()
X[‘Theta0’] = np.ones(len(data))
X[‘Theta1’] = data[‘chirps’]
X = np.asmatrix(X)
y = data[‘Temperature’]
y = np.asmatrix(y)
y = y.reshape(len(X),1)
theta = np.matrix(np.array([0,0]))
alpha = 0.01
iters = 1000
def computeCost(X, y, theta):
inner = np.power(((X * theta.T) - y), 2)
return np.sum(inner) / (2 * len(X))
print (X.shape, theta.shape, y.shape)
print (computeCost(X, y, theta) )
def gradientDescent(X, y, theta, alpha, iters):
temp = np.matrix(np.zeros(theta.shape))
parameters = int(theta.ravel().shape[1])
cost = np.zeros(iters)

for i in range(iters):
    error = (X * theta.T) - y

    for j in range(parameters):
        term = np.multiply(error, X[:,j])
        temp[0,j] = theta[0,j] - ((alpha / len(X)) * np.sum(term))

    theta = temp
    cost[i] = computeCost(X, y, theta)

return theta, cost

g, cost = gradientDescent(X, y, theta, alpha, iters = 20000)
print (g)

Posts: 1

Participants: 1

Read full topic


Viewing all articles
Browse latest Browse all 4448

Trending Articles