Upgrade to Python 3.13 (#11588)

This commit is contained in:
Christian Clauss
2024-09-30 23:01:15 +02:00
committed by GitHub
parent a7bfa22455
commit 0177ae1cd5
35 changed files with 135 additions and 131 deletions

View File

@ -26,15 +26,15 @@ class DecisionTree:
>>> tester = DecisionTree()
>>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10])
>>> test_prediction = float(6)
>>> tester.mean_squared_error(test_labels, test_prediction) == (
>>> bool(tester.mean_squared_error(test_labels, test_prediction) == (
... TestDecisionTree.helper_mean_squared_error_test(test_labels,
... test_prediction))
... test_prediction)))
True
>>> test_labels = np.array([1,2,3])
>>> test_prediction = float(2)
>>> tester.mean_squared_error(test_labels, test_prediction) == (
>>> bool(tester.mean_squared_error(test_labels, test_prediction) == (
... TestDecisionTree.helper_mean_squared_error_test(test_labels,
... test_prediction))
... test_prediction)))
True
"""
if labels.ndim != 1:

View File

@ -28,7 +28,7 @@ def linear_regression_prediction(
input : training data (date, total_user, total_event) in list of float
output : list of total user prediction in float
>>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2])
>>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors
>>> bool(abs(n - 5.0) < 1e-6) # Checking precision because of floating point errors
True
"""
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
@ -56,7 +56,7 @@ def sarimax_predictor(train_user: list, train_match: list, test_match: list) ->
)
model_fit = model.fit(disp=False, maxiter=600, method="nm")
result = model_fit.predict(1, len(test_match), exog=[test_match])
return result[0]
return float(result[0])
def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
@ -75,7 +75,7 @@ def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> f
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
regressor.fit(x_train, train_user)
y_pred = regressor.predict(x_test)
return y_pred[0]
return float(y_pred[0])
def interquartile_range_checker(train_user: list) -> float:
@ -92,7 +92,7 @@ def interquartile_range_checker(train_user: list) -> float:
q3 = np.percentile(train_user, 75)
iqr = q3 - q1
low_lim = q1 - (iqr * 0.1)
return low_lim
return float(low_lim)
def data_safety_checker(list_vote: list, actual_result: float) -> bool:

View File

@ -42,7 +42,7 @@ class KNN:
>>> KNN._euclidean_distance(np.array([1, 2, 3]), np.array([1, 8, 11]))
10.0
"""
return np.linalg.norm(a - b)
return float(np.linalg.norm(a - b))
def classify(self, pred_point: np.ndarray[float], k: int = 5) -> str:
"""

View File

@ -45,7 +45,7 @@ def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray:
@returns: returns value in the range 0 to 1
Examples:
>>> sigmoid_function(4)
>>> float(sigmoid_function(4))
0.9820137900379085
>>> sigmoid_function(np.array([-3, 3]))
array([0.04742587, 0.95257413])
@ -100,7 +100,7 @@ def cost_function(h: np.ndarray, y: np.ndarray) -> float:
References:
- https://en.wikipedia.org/wiki/Logistic_regression
"""
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
return float((-y * np.log(h) - (1 - y) * np.log(1 - h)).mean())
def log_likelihood(x, y, weights):

View File

@ -22,7 +22,7 @@ def binary_cross_entropy(
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> binary_cross_entropy(true_labels, predicted_probs)
>>> float(binary_cross_entropy(true_labels, predicted_probs))
0.2529995012327421
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@ -68,7 +68,7 @@ def binary_focal_cross_entropy(
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8])
>>> binary_focal_cross_entropy(true_labels, predicted_probs)
>>> float(binary_focal_cross_entropy(true_labels, predicted_probs))
0.008257977659239775
>>> true_labels = np.array([0, 1, 1, 0, 1])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@ -108,7 +108,7 @@ def categorical_cross_entropy(
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> categorical_cross_entropy(true_labels, pred_probs)
>>> float(categorical_cross_entropy(true_labels, pred_probs))
0.567395975254385
>>> true_labels = np.array([[1, 0], [0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]])
@ -179,13 +179,13 @@ def categorical_focal_cross_entropy(
>>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]])
>>> alpha = np.array([0.6, 0.2, 0.7])
>>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha)
>>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha))
0.0025966118981496423
>>> true_labels = np.array([[0, 1, 0], [0, 0, 1]])
>>> pred_probs = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> alpha = np.array([0.25, 0.25, 0.25])
>>> categorical_focal_cross_entropy(true_labels, pred_probs, alpha)
>>> float(categorical_focal_cross_entropy(true_labels, pred_probs, alpha))
0.23315276982014324
>>> true_labels = np.array([[1, 0], [0, 1]])
@ -265,7 +265,7 @@ def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float:
>>> true_labels = np.array([-1, 1, 1, -1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
>>> hinge_loss(true_labels, pred)
>>> float(hinge_loss(true_labels, pred))
1.52
>>> true_labels = np.array([-1, 1, 1, -1, 1, 1])
>>> pred = np.array([-4, -0.3, 0.7, 5, 10])
@ -309,11 +309,11 @@ def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float:
>>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102)
>>> bool(np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102))
True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
>>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164)
>>> bool(np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164))
True
>>> true_labels = np.array([11.0, 21.0, 3.32, 4.0])
>>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0])
@ -347,7 +347,7 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028)
>>> bool(np.isclose(mean_squared_error(true_values, predicted_values), 0.028))
True
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@ -381,11 +381,11 @@ def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16)
>>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 0.16))
True
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16)
>>> bool(np.isclose(mean_absolute_error(true_values, predicted_values), 2.16))
False
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2])
@ -420,7 +420,7 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl
>>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2])
>>> mean_squared_logarithmic_error(true_values, predicted_values)
>>> float(mean_squared_logarithmic_error(true_values, predicted_values))
0.0030860877925181344
>>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
>>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2])
@ -459,17 +459,17 @@ def mean_absolute_percentage_error(
Examples:
>>> y_true = np.array([10, 20, 30, 40])
>>> y_pred = np.array([12, 18, 33, 45])
>>> mean_absolute_percentage_error(y_true, y_pred)
>>> float(mean_absolute_percentage_error(y_true, y_pred))
0.13125
>>> y_true = np.array([1, 2, 3, 4])
>>> y_pred = np.array([2, 3, 4, 5])
>>> mean_absolute_percentage_error(y_true, y_pred)
>>> float(mean_absolute_percentage_error(y_true, y_pred))
0.5208333333333333
>>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24])
>>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23])
>>> mean_absolute_percentage_error(y_true, y_pred)
>>> float(mean_absolute_percentage_error(y_true, y_pred))
0.064671076436071
"""
if len(y_true) != len(y_pred):
@ -511,7 +511,7 @@ def perplexity_loss(
... [[0.03, 0.26, 0.21, 0.18, 0.30],
... [0.28, 0.10, 0.33, 0.15, 0.12]]]
... )
>>> perplexity_loss(y_true, y_pred)
>>> float(perplexity_loss(y_true, y_pred))
5.0247347775367945
>>> y_true = np.array([[1, 4], [2, 3]])
>>> y_pred = np.array(
@ -600,17 +600,17 @@ def smooth_l1_loss(y_true: np.ndarray, y_pred: np.ndarray, beta: float = 1.0) ->
>>> y_true = np.array([3, 5, 2, 7])
>>> y_pred = np.array([2.9, 4.8, 2.1, 7.2])
>>> smooth_l1_loss(y_true, y_pred, 1.0)
>>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.012500000000000022
>>> y_true = np.array([2, 4, 6])
>>> y_pred = np.array([1, 5, 7])
>>> smooth_l1_loss(y_true, y_pred, 1.0)
>>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.5
>>> y_true = np.array([1, 3, 5, 7])
>>> y_pred = np.array([1, 3, 5, 7])
>>> smooth_l1_loss(y_true, y_pred, 1.0)
>>> float(smooth_l1_loss(y_true, y_pred, 1.0))
0.0
>>> y_true = np.array([1, 3, 5])
@ -647,7 +647,7 @@ def kullback_leibler_divergence(y_true: np.ndarray, y_pred: np.ndarray) -> float
>>> true_labels = np.array([0.2, 0.3, 0.5])
>>> predicted_probs = np.array([0.3, 0.3, 0.4])
>>> kullback_leibler_divergence(true_labels, predicted_probs)
>>> float(kullback_leibler_divergence(true_labels, predicted_probs))
0.030478754035472025
>>> true_labels = np.array([0.2, 0.3, 0.5])
>>> predicted_probs = np.array([0.3, 0.3, 0.4, 0.5])

View File

@ -162,9 +162,9 @@ def normalize(audio: np.ndarray) -> np.ndarray:
Examples:
>>> audio = np.array([1, 2, 3, 4, 5])
>>> normalized_audio = normalize(audio)
>>> np.max(normalized_audio)
>>> float(np.max(normalized_audio))
1.0
>>> np.min(normalized_audio)
>>> float(np.min(normalized_audio))
0.2
"""
# Divide the entire audio signal by the maximum absolute value
@ -229,7 +229,8 @@ def calculate_fft(audio_windowed: np.ndarray, ftt_size: int = 1024) -> np.ndarra
Examples:
>>> audio_windowed = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> audio_fft = calculate_fft(audio_windowed, ftt_size=4)
>>> np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j, -1.5-0.8660254j]))
>>> bool(np.allclose(audio_fft[0], np.array([6.0+0.j, -1.5+0.8660254j,
... -1.5-0.8660254j])))
True
"""
# Transpose the audio data to have time in rows and channels in columns
@ -281,7 +282,7 @@ def freq_to_mel(freq: float) -> float:
The frequency in mel scale.
Examples:
>>> round(freq_to_mel(1000), 2)
>>> float(round(freq_to_mel(1000), 2))
999.99
"""
# Use the formula to convert frequency to the mel scale
@ -321,7 +322,7 @@ def mel_spaced_filterbank(
Mel-spaced filter bank.
Examples:
>>> round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10)
>>> float(round(mel_spaced_filterbank(8000, 10, 1024)[0][1], 10))
0.0004603981
"""
freq_min = 0
@ -438,7 +439,7 @@ def discrete_cosine_transform(dct_filter_num: int, filter_num: int) -> np.ndarra
The DCT basis matrix.
Examples:
>>> round(discrete_cosine_transform(3, 5)[0][0], 5)
>>> float(round(discrete_cosine_transform(3, 5)[0][0], 5))
0.44721
"""
basis = np.empty((dct_filter_num, filter_num))

View File

@ -17,7 +17,7 @@ Y = clf.predict(test)
def wrapper(y):
"""
>>> wrapper(Y)
>>> [int(x) for x in wrapper(Y)]
[0, 0, 1]
"""
return list(y)

View File

@ -20,11 +20,11 @@ def mae(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3]
>>> np.around(mae(predict,actual),decimals = 2)
>>> float(np.around(mae(predict,actual),decimals = 2))
0.67
>>> actual = [1,1,1];predict = [1,1,1]
>>> mae(predict,actual)
>>> float(mae(predict,actual))
0.0
"""
predict = np.array(predict)
@ -41,11 +41,11 @@ def mse(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3]
>>> np.around(mse(predict,actual),decimals = 2)
>>> float(np.around(mse(predict,actual),decimals = 2))
1.33
>>> actual = [1,1,1];predict = [1,1,1]
>>> mse(predict,actual)
>>> float(mse(predict,actual))
0.0
"""
predict = np.array(predict)
@ -63,11 +63,11 @@ def rmse(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [1,2,3];predict = [1,4,3]
>>> np.around(rmse(predict,actual),decimals = 2)
>>> float(np.around(rmse(predict,actual),decimals = 2))
1.15
>>> actual = [1,1,1];predict = [1,1,1]
>>> rmse(predict,actual)
>>> float(rmse(predict,actual))
0.0
"""
predict = np.array(predict)
@ -84,12 +84,10 @@ def rmse(predict, actual):
def rmsle(predict, actual):
"""
Examples(rounded for precision):
>>> actual = [10,10,30];predict = [10,2,30]
>>> np.around(rmsle(predict,actual),decimals = 2)
>>> float(np.around(rmsle(predict=[10, 2, 30], actual=[10, 10, 30]), decimals=2))
0.75
>>> actual = [1,1,1];predict = [1,1,1]
>>> rmsle(predict,actual)
>>> float(rmsle(predict=[1, 1, 1], actual=[1, 1, 1]))
0.0
"""
predict = np.array(predict)
@ -117,12 +115,12 @@ def mbd(predict, actual):
Here the model overpredicts
>>> actual = [1,2,3];predict = [2,3,4]
>>> np.around(mbd(predict,actual),decimals = 2)
>>> float(np.around(mbd(predict,actual),decimals = 2))
50.0
Here the model underpredicts
>>> actual = [1,2,3];predict = [0,1,1]
>>> np.around(mbd(predict,actual),decimals = 2)
>>> float(np.around(mbd(predict,actual),decimals = 2))
-66.67
"""
predict = np.array(predict)

View File

@ -153,7 +153,7 @@ def cosine_similarity(input_a: np.ndarray, input_b: np.ndarray) -> float:
>>> cosine_similarity(np.array([1, 2]), np.array([6, 32]))
0.9615239476408232
"""
return np.dot(input_a, input_b) / (norm(input_a) * norm(input_b))
return float(np.dot(input_a, input_b) / (norm(input_a) * norm(input_b)))
if __name__ == "__main__":

View File

@ -14,11 +14,11 @@ def norm_squared(vector: ndarray) -> float:
Returns:
float: squared second norm of vector
>>> norm_squared([1, 2])
>>> int(norm_squared([1, 2]))
5
>>> norm_squared(np.asarray([1, 2]))
>>> int(norm_squared(np.asarray([1, 2])))
5
>>> norm_squared([0, 0])
>>> int(norm_squared([0, 0]))
0
"""
return np.dot(vector, vector)