Exp 4

Run Settings
LanguageC
Language Version
Run Command
linear regression fromsklearn import datasets fromsklearn import metrics disease = datasets.load_diabetes() #print(disease) print(disease.keys()) import numpy as np disease_X = disease.data[:, np.newaxis,2] #print(disease_X) disease_X_train = disease_X[:-30] disease_X_test = disease_X[-20:] disease_Y_train = disease.target[:-30] disease_Y_test = disease.target[-20:] fromsklearn import linear_model reg = linear_model.LinearRegression() reg.fit(disease_X_train,disease_Y_train) y_predict = reg.predict(disease_X_test) accuracy = metrics.mean_squared_error(disease_Y_test,y_predict,) print("accuracy=",accuracy) weights = reg.coef_ intercept = reg.intercept_ print(weights,intercept) importmatplotlib.pyplot as plt plt.scatter(disease_X_test, disease_Y_test) plt.plot(disease_X_test,y_predict) plt.show() Output: dict_keys(['data', 'target', 'frame', 'DESCR', 'feature_names', 'data_filename', 'target_filename', 'data_module']) accuracy= 2561.3204277283867 [941.43097333] 153.39713623331698 // logicstic regression import numpy as np importmatplotlib.pyplot as plt dataset = pd.read_csv("User_Data.csv") # input x = dataset.iloc[:, [2, 3]].values # output y = dataset.iloc[:, 4].values fromsklearn.model_selection import train_test_split xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25, random_state=0) fromsklearn.preprocessing import StandardScaler sc_x = StandardScaler() xtrain = sc_x.fit_transform(xtrain) xtest = sc_x.transform(xtest) print (xtrain[0:10, :]) fromsklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(xtrain, ytrain) y_pred = classifier.predict(xtest) fromsklearn.metrics import confusion_matrix cm = confusion_matrix(ytest, y_pred) print ("Confusion Matrix : \n", cm) fromsklearn.metrics import accuracy_score print ("Accuracy : ", accuracy_score(ytest, y_pred)) frommatplotlib.colors import ListedColormap X_set, y_set = xtest, ytest X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict( np.array([X1.ravel(), X2.ravel()]).T).reshape( X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) fori, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Classifier (Test set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() Output: [[ 0.58164944 -0.88670699] [-0.60673761 1.46173768] [-0.01254409 -0.5677824 ] [-0.60673761 1.89663484] [ 1.37390747 -1.40858358] [ 1.47293972 0.99784738] [ 0.08648817 -0.79972756] [-0.01254409 -0.24885782] [-0.21060859 -0.5677824 ] [-0.21060859 -0.19087153]] Confusion Matrix : [[65 3] [ 8 24]] Accuracy : 0.89 //Binary Classifier from numpy import where from collections import Counter fromsklearn.datasets import make_blobs frommatplotlib import pyplot # define dataset X, y = make_blobs(n_samples=1000, centers=2, random_state=1) # summarize dataset shape print(X.shape, y.shape) # summarize observations by class label counter = Counter(y) print(counter) # summarize first few examples fori in range(10): print(X[i], y[i]) # plot the dataset and color the by class label for label, _ in counter.items(): row_ix = where(y == label)[0] pyplot.scatter(X[row_ix, 0], X[row_ix, 1], label=str(label)) pyplot.legend() pyplot.show() Output: (1000, 2) (1000,) Counter({0: 500, 1: 500}) [-3.05837272 4.48825769] 0 [-8.60973869 -3.72714879] 1 [1.37129721 5.23107449] 0 [-9.33917563 -2.9544469 ] 1 [-11.57178593 -3.85275513] 1 [-11.42257341 -4.85679127] 1 [-10.44518578 -3.76476563] 1 [-10.44603561 [-0.61947075 3. [-10.91115591 -3.26065964] 1 48804983] 0 -4.5772537 ] 1
Editor Settings
Theme
Key bindings
Full width
Lines