Set the Python file maximum line length to 88 characters (#2122)

* flake8 --max-line-length=88

* fixup! Format Python code with psf/black push

Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com>
This commit is contained in:
Christian Clauss
2020-06-16 10:09:19 +02:00
committed by GitHub
parent 9438c6bf0b
commit 9316e7c014
90 changed files with 473 additions and 320 deletions

View File

@ -1,5 +1,6 @@
"""
Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis function.
Implementation of gradient descent algorithm for minimizing cost of a linear hypothesis
function.
"""
import numpy
@ -75,7 +76,8 @@ def summation_of_cost_derivative(index, end=m):
:param index: index wrt derivative is being calculated
:param end: value where summation ends, default is m, number of examples
:return: Returns the summation of cost derivative
Note: If index is -1, this means we are calculating summation wrt to biased parameter.
Note: If index is -1, this means we are calculating summation wrt to biased
parameter.
"""
summation_value = 0
for i in range(end):
@ -90,7 +92,8 @@ def get_cost_derivative(index):
"""
:param index: index of the parameter vector wrt to derivative is to be calculated
:return: derivative wrt to that index
Note: If index is -1, this means we are calculating summation wrt to biased parameter.
Note: If index is -1, this means we are calculating summation wrt to biased
parameter.
"""
cost_derivative_value = summation_of_cost_derivative(index, m) / m
return cost_derivative_value

View File

@ -10,7 +10,8 @@ from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
dataset = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/position_salaries.csv"
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values

View File

@ -42,7 +42,10 @@ import pandas as pd
from sklearn.datasets import make_blobs, make_circles
from sklearn.preprocessing import StandardScaler
CANCER_DATASET_URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
CANCER_DATASET_URL = (
"http://archive.ics.uci.edu/ml/machine-learning-databases/"
"breast-cancer-wisconsin/wdbc.data"
)
class SmoSVM:
@ -124,7 +127,8 @@ class SmoSVM:
b_old = self._b
self._b = b
# 4: update error value,here we only calculate those non-bound samples' error
# 4: update error value,here we only calculate those non-bound samples'
# error
self._unbound = [i for i in self._all_samples if self._is_unbound(i)]
for s in self.unbound:
if s == i1 or s == i2:
@ -231,8 +235,10 @@ class SmoSVM:
"""
Choose first alpha ;steps:
1:First loop over all sample
2:Second loop over all non-bound samples till all non-bound samples does not voilate kkt condition.
3:Repeat this two process endlessly,till all samples does not voilate kkt condition samples after first loop.
2:Second loop over all non-bound samples till all non-bound samples does not
voilate kkt condition.
3:Repeat this two process endlessly,till all samples does not voilate kkt
condition samples after first loop.
"""
while True:
all_not_obey = True
@ -352,8 +358,8 @@ class SmoSVM:
)
"""
# way 2
Use objective function check which alpha2 new could get the minimal objectives
Use objective function check which alpha2 new could get the minimal
objectives
"""
if ol < (oh - self._eps):
a2_new = L
@ -572,11 +578,11 @@ def plot_partition_boundary(
model, train_data, ax, resolution=100, colors=("b", "k", "r")
):
"""
We can not get the optimum w of our kernel svm model which is different from linear svm.
For this reason, we generate randomly distributed points with high desity and prediced values of these points are
calculated by using our tained model. Then we could use this prediced values to draw contour map.
We can not get the optimum w of our kernel svm model which is different from linear
svm. For this reason, we generate randomly distributed points with high desity and
prediced values of these points are calculated by using our tained model. Then we
could use this prediced values to draw contour map.
And this contour map can represent svm's partition boundary.
"""
train_data_x = train_data[:, 1]
train_data_y = train_data[:, 2]