diff --git a/.gitignore b/.gitignore index c744bced2..b7c6872d3 100644 --- a/.gitignore +++ b/.gitignore @@ -5,5 +5,3 @@ *.png *.jpg *.gif - -docs-en \ No newline at end of file diff --git a/docs-en/chapter_computational_complexity/index.md b/docs-en/chapter_computational_complexity/index.md new file mode 100644 index 000000000..96b584e45 --- /dev/null +++ b/docs-en/chapter_computational_complexity/index.md @@ -0,0 +1,26 @@ +--- +comments: true +icon: material/timer-sand +--- + +# 第 2 章 Complexity Analysis + +
Figure 2-1 Flow block diagram of the summation function
+ +The number of operations in this summation function is proportional to the size of the input data $n$, or a "linear relationship". In fact, **time complexity describes this "linear relationship"**. This is described in more detail in the next section. + +### 2. While Loop + +Similar to a `for` loop, a `while` loop is a way to implement iteration. In a `while` loop, the program first checks the condition at each turn, and if the condition is true, it continues, otherwise it ends the loop. + +Below, we use a `while` loop to realize the summation $1 + 2 + \dots + n$ . + +=== "Python" + + ```python title="iteration.py" + def while_loop(n: int) -> int: + """while 循环""" + res = 0 + i = 1 # 初始化条件变量 + # 循环求和 1, 2, ..., n-1, n + while i <= n: + res += i + i += 1 # 更新条件变量 + return res + ``` + +=== "C++" + + ```cpp title="iteration.cpp" + /* while 循环 */ + int whileLoop(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while (i <= n) { + res += i; + i++; // 更新条件变量 + } + return res; + } + ``` + +=== "Java" + + ```java title="iteration.java" + /* while 循环 */ + int whileLoop(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while (i <= n) { + res += i; + i++; // 更新条件变量 + } + return res; + } + ``` + +=== "C#" + + ```csharp title="iteration.cs" + /* while 循环 */ + int WhileLoop(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while (i <= n) { + res += i; + i += 1; // 更新条件变量 + } + return res; + } + ``` + +=== "Go" + + ```go title="iteration.go" + /* while 循环 */ + func whileLoop(n int) int { + res := 0 + // 初始化条件变量 + i := 1 + // 循环求和 1, 2, ..., n-1, n + for i <= n { + res += i + // 更新条件变量 + i++ + } + return res + } + ``` + +=== "Swift" + + ```swift title="iteration.swift" + /* while 循环 */ + func whileLoop(n: Int) -> Int { + var res = 0 + var i = 1 // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while i <= n { + res += i + i += 1 // 更新条件变量 + } + return res + } + ``` + +=== "JS" + + ```javascript title="iteration.js" + /* while 循环 */ + function whileLoop(n) { + let res = 0; + let i = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while (i <= n) { + res += i; + i++; // 更新条件变量 + } + return res; + } + ``` + +=== "TS" + + ```typescript title="iteration.ts" + /* while 循环 */ + function whileLoop(n: number): number { + let res = 0; + let i = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while (i <= n) { + res += i; + i++; // 更新条件变量 + } + return res; + } + ``` + +=== "Dart" + + ```dart title="iteration.dart" + /* while 循环 */ + int whileLoop(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while (i <= n) { + res += i; + i++; // 更新条件变量 + } + return res; + } + ``` + +=== "Rust" + + ```rust title="iteration.rs" + /* while 循环 */ + fn while_loop(n: i32) -> i32 { + let mut res = 0; + let mut i = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while i <= n { + res += i; + i += 1; // 更新条件变量 + } + res + } + ``` + +=== "C" + + ```c title="iteration.c" + /* while 循环 */ + int whileLoop(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while (i <= n) { + res += i; + i++; // 更新条件变量 + } + return res; + } + ``` + +=== "Zig" + + ```zig title="iteration.zig" + // while 循环 + fn whileLoop(n: i32) i32 { + var res: i32 = 0; + var i: i32 = 1; // 初始化条件变量 + // 循环求和 1, 2, ..., n-1, n + while (i <= n) { + res += @intCast(i); + i += 1; + } + return res; + } + ``` + +In `while` loops, since the steps of initializing and updating condition variables are independent of the loop structure, **it has more degrees of freedom than `for` loops**. + +For example, in the following code, the condition variable $i$ is updated twice per round, which is not convenient to implement with a `for` loop. + +=== "Python" + + ```python title="iteration.py" + def while_loop_ii(n: int) -> int: + """while 循环(两次更新)""" + res = 0 + i = 1 # 初始化条件变量 + # 循环求和 1, 4, 10, ... + while i <= n: + res += i + # 更新条件变量 + i += 1 + i *= 2 + return res + ``` + +=== "C++" + + ```cpp title="iteration.cpp" + /* while 循环(两次更新) */ + int whileLoopII(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while (i <= n) { + res += i; + // 更新条件变量 + i++; + i *= 2; + } + return res; + } + ``` + +=== "Java" + + ```java title="iteration.java" + /* while 循环(两次更新) */ + int whileLoopII(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while (i <= n) { + res += i; + // 更新条件变量 + i++; + i *= 2; + } + return res; + } + ``` + +=== "C#" + + ```csharp title="iteration.cs" + /* while 循环(两次更新) */ + int WhileLoopII(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 2, 4, 5... + while (i <= n) { + res += i; + // 更新条件变量 + i += 1; + i *= 2; + } + return res; + } + ``` + +=== "Go" + + ```go title="iteration.go" + /* while 循环(两次更新) */ + func whileLoopII(n int) int { + res := 0 + // 初始化条件变量 + i := 1 + // 循环求和 1, 4, 10, ... + for i <= n { + res += i + // 更新条件变量 + i++ + i *= 2 + } + return res + } + ``` + +=== "Swift" + + ```swift title="iteration.swift" + /* while 循环(两次更新) */ + func whileLoopII(n: Int) -> Int { + var res = 0 + var i = 1 // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while i <= n { + res += i + // 更新条件变量 + i += 1 + i *= 2 + } + return res + } + ``` + +=== "JS" + + ```javascript title="iteration.js" + /* while 循环(两次更新) */ + function whileLoopII(n) { + let res = 0; + let i = 1; // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while (i <= n) { + res += i; + // 更新条件变量 + i++; + i *= 2; + } + return res; + } + ``` + +=== "TS" + + ```typescript title="iteration.ts" + /* while 循环(两次更新) */ + function whileLoopII(n: number): number { + let res = 0; + let i = 1; // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while (i <= n) { + res += i; + // 更新条件变量 + i++; + i *= 2; + } + return res; + } + ``` + +=== "Dart" + + ```dart title="iteration.dart" + /* while 循环(两次更新) */ + int whileLoopII(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while (i <= n) { + res += i; + // 更新条件变量 + i++; + i *= 2; + } + return res; + } + ``` + +=== "Rust" + + ```rust title="iteration.rs" + /* while 循环(两次更新) */ + fn while_loop_ii(n: i32) -> i32 { + let mut res = 0; + let mut i = 1; // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while i <= n { + res += i; + // 更新条件变量 + i += 1; + i *= 2; + } + res + } + ``` + +=== "C" + + ```c title="iteration.c" + /* while 循环(两次更新) */ + int whileLoopII(int n) { + int res = 0; + int i = 1; // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while (i <= n) { + res += i; + // 更新条件变量 + i++; + i *= 2; + } + return res; + } + ``` + +=== "Zig" + + ```zig title="iteration.zig" + // while 循环(两次更新) + fn whileLoopII(n: i32) i32 { + var res: i32 = 0; + var i: i32 = 1; // 初始化条件变量 + // 循环求和 1, 4, 10, ... + while (i <= n) { + res += @intCast(i); + // 更新条件变量 + i += 1; + i *= 2; + } + return res; + } + ``` + +Overall, **`for` loops have more compact code and `while` loops are more flexible**, and both can implement iteration structures. The choice of which one to use should be based on the needs of the particular problem. + +### 3. Nested Loops + +We can nest one loop structure inside another, using the `for` loop as an example: + +=== "Python" + + ```python title="iteration.py" + def nested_for_loop(n: int) -> str: + """双层 for 循环""" + res = "" + # 循环 i = 1, 2, ..., n-1, n + for i in range(1, n + 1): + # 循环 j = 1, 2, ..., n-1, n + for j in range(1, n + 1): + res += f"({i}, {j}), " + return res + ``` + +=== "C++" + + ```cpp title="iteration.cpp" + /* 双层 for 循环 */ + string nestedForLoop(int n) { + ostringstream res; + // 循环 i = 1, 2, ..., n-1, n + for (int i = 1; i <= n; ++i) { + // 循环 j = 1, 2, ..., n-1, n + for (int j = 1; j <= n; ++j) { + res << "(" << i << ", " << j << "), "; + } + } + return res.str(); + } + ``` + +=== "Java" + + ```java title="iteration.java" + /* 双层 for 循环 */ + String nestedForLoop(int n) { + StringBuilder res = new StringBuilder(); + // 循环 i = 1, 2, ..., n-1, n + for (int i = 1; i <= n; i++) { + // 循环 j = 1, 2, ..., n-1, n + for (int j = 1; j <= n; j++) { + res.append("(" + i + ", " + j + "), "); + } + } + return res.toString(); + } + ``` + +=== "C#" + + ```csharp title="iteration.cs" + /* 双层 for 循环 */ + string NestedForLoop(int n) { + StringBuilder res = new(); + // 循环 i = 1, 2, ..., n-1, n + for (int i = 1; i <= n; i++) { + // 循环 j = 1, 2, ..., n-1, n + for (int j = 1; j <= n; j++) { + res.Append($"({i}, {j}), "); + } + } + return res.ToString(); + } + ``` + +=== "Go" + + ```go title="iteration.go" + /* 双层 for 循环 */ + func nestedForLoop(n int) string { + res := "" + // 循环 i = 1, 2, ..., n-1, n + for i := 1; i <= n; i++ { + for j := 1; j <= n; j++ { + // 循环 j = 1, 2, ..., n-1, n + res += fmt.Sprintf("(%d, %d), ", i, j) + } + } + return res + } + ``` + +=== "Swift" + + ```swift title="iteration.swift" + /* 双层 for 循环 */ + func nestedForLoop(n: Int) -> String { + var res = "" + // 循环 i = 1, 2, ..., n-1, n + for i in 1 ... n { + // 循环 j = 1, 2, ..., n-1, n + for j in 1 ... n { + res.append("(\(i), \(j)), ") + } + } + return res + } + ``` + +=== "JS" + + ```javascript title="iteration.js" + /* 双层 for 循环 */ + function nestedForLoop(n) { + let res = ''; + // 循环 i = 1, 2, ..., n-1, n + for (let i = 1; i <= n; i++) { + // 循环 j = 1, 2, ..., n-1, n + for (let j = 1; j <= n; j++) { + res += `(${i}, ${j}), `; + } + } + return res; + } + ``` + +=== "TS" + + ```typescript title="iteration.ts" + /* 双层 for 循环 */ + function nestedForLoop(n: number): string { + let res = ''; + // 循环 i = 1, 2, ..., n-1, n + for (let i = 1; i <= n; i++) { + // 循环 j = 1, 2, ..., n-1, n + for (let j = 1; j <= n; j++) { + res += `(${i}, ${j}), `; + } + } + return res; + } + ``` + +=== "Dart" + + ```dart title="iteration.dart" + /* 双层 for 循环 */ + String nestedForLoop(int n) { + String res = ""; + // 循环 i = 1, 2, ..., n-1, n + for (int i = 1; i <= n; i++) { + // 循环 j = 1, 2, ..., n-1, n + for (int j = 1; j <= n; j++) { + res += "($i, $j), "; + } + } + return res; + } + ``` + +=== "Rust" + + ```rust title="iteration.rs" + /* 双层 for 循环 */ + fn nested_for_loop(n: i32) -> String { + let mut res = vec![]; + // 循环 i = 1, 2, ..., n-1, n + for i in 1..=n { + // 循环 j = 1, 2, ..., n-1, n + for j in 1..=n { + res.push(format!("({}, {}), ", i, j)); + } + } + res.join("") + } + ``` + +=== "C" + + ```c title="iteration.c" + /* 双层 for 循环 */ + char *nestedForLoop(int n) { + // n * n 为对应点数量,"(i, j), " 对应字符串长最大为 6+10*2,加上最后一个空字符 \0 的额外空间 + int size = n * n * 26 + 1; + char *res = malloc(size * sizeof(char)); + // 循环 i = 1, 2, ..., n-1, n + for (int i = 1; i <= n; i++) { + // 循环 j = 1, 2, ..., n-1, n + for (int j = 1; j <= n; j++) { + char tmp[26]; + snprintf(tmp, sizeof(tmp), "(%d, %d), ", i, j); + strncat(res, tmp, size - strlen(res) - 1); + } + } + return res; + } + ``` + +=== "Zig" + + ```zig title="iteration.zig" + // 双层 for 循环 + fn nestedForLoop(allocator: Allocator, n: usize) ![]const u8 { + var res = std.ArrayList(u8).init(allocator); + defer res.deinit(); + var buffer: [20]u8 = undefined; + // 循环 i = 1, 2, ..., n-1, n + for (1..n+1) |i| { + // 循环 j = 1, 2, ..., n-1, n + for (1..n+1) |j| { + var _str = try std.fmt.bufPrint(&buffer, "({d}, {d}), ", .{i, j}); + try res.appendSlice(_str); + } + } + return res.toOwnedSlice(); + } + ``` + +The Figure 2-2 gives the block diagram of the flow of this nested loop. + +{ class="animation-figure" } + +Figure 2-2 Block diagram of the flow of nested loops
+ +In this case, the number of operations of the function is proportional to $n^2$, or the algorithm's running time is "squared" to the size of the input data $n$. + +We can continue to add nested loops, and each nest is a "dimension up", which will increase the time complexity to "cubic relations", "quadratic relations", and so on. + +## 2.2.2 Recursion + + "Recursion recursion is an algorithmic strategy to solve a problem by calling the function itself. It consists of two main phases. + +1. **recursive**: the program calls itself deeper and deeper, usually passing smaller or simpler arguments, until a "termination condition" is reached. +2. **Recursion**: After the "termination condition" is triggered, the program returns from the deepest level of the recursion function, level by level, aggregating the results of each level. + +And from an implementation point of view, recursion code contains three main elements. + +1. **Termination condition**: used to decide when to switch from "recursive" to "inductive". +2. **Recursion call**: corresponds to "recursion", where the function calls itself, usually with smaller or more simplified input parameters. +3. **return result**: corresponds to "return", returning the result of the current recursion level to the previous one. + +Observe the following code, we only need to call the function `recur(n)` , and the calculation of $1 + 2 + \dots + n$ is done: + +=== "Python" + + ```python title="recursion.py" + def recur(n: int) -> int: + """递归""" + # 终止条件 + if n == 1: + return 1 + # 递:递归调用 + res = recur(n - 1) + # 归:返回结果 + return n + res + ``` + +=== "C++" + + ```cpp title="recursion.cpp" + /* 递归 */ + int recur(int n) { + // 终止条件 + if (n == 1) + return 1; + // 递:递归调用 + int res = recur(n - 1); + // 归:返回结果 + return n + res; + } + ``` + +=== "Java" + + ```java title="recursion.java" + /* 递归 */ + int recur(int n) { + // 终止条件 + if (n == 1) + return 1; + // 递:递归调用 + int res = recur(n - 1); + // 归:返回结果 + return n + res; + } + ``` + +=== "C#" + + ```csharp title="recursion.cs" + /* 递归 */ + int Recur(int n) { + // 终止条件 + if (n == 1) + return 1; + // 递:递归调用 + int res = Recur(n - 1); + // 归:返回结果 + return n + res; + } + ``` + +=== "Go" + + ```go title="recursion.go" + /* 递归 */ + func recur(n int) int { + // 终止条件 + if n == 1 { + return 1 + } + // 递:递归调用 + res := recur(n - 1) + // 归:返回结果 + return n + res + } + ``` + +=== "Swift" + + ```swift title="recursion.swift" + /* 递归 */ + func recur(n: Int) -> Int { + // 终止条件 + if n == 1 { + return 1 + } + // 递:递归调用 + let res = recur(n: n - 1) + // 归:返回结果 + return n + res + } + ``` + +=== "JS" + + ```javascript title="recursion.js" + /* 递归 */ + function recur(n) { + // 终止条件 + if (n === 1) return 1; + // 递:递归调用 + const res = recur(n - 1); + // 归:返回结果 + return n + res; + } + ``` + +=== "TS" + + ```typescript title="recursion.ts" + /* 递归 */ + function recur(n: number): number { + // 终止条件 + if (n === 1) return 1; + // 递:递归调用 + const res = recur(n - 1); + // 归:返回结果 + return n + res; + } + ``` + +=== "Dart" + + ```dart title="recursion.dart" + /* 递归 */ + int recur(int n) { + // 终止条件 + if (n == 1) return 1; + // 递:递归调用 + int res = recur(n - 1); + // 归:返回结果 + return n + res; + } + ``` + +=== "Rust" + + ```rust title="recursion.rs" + /* 递归 */ + fn recur(n: i32) -> i32 { + // 终止条件 + if n == 1 { + return 1; + } + // 递:递归调用 + let res = recur(n - 1); + // 归:返回结果 + n + res + } + ``` + +=== "C" + + ```c title="recursion.c" + /* 递归 */ + int recur(int n) { + // 终止条件 + if (n == 1) + return 1; + // 递:递归调用 + int res = recur(n - 1); + // 归:返回结果 + return n + res; + } + ``` + +=== "Zig" + + ```zig title="recursion.zig" + // 递归函数 + fn recur(n: i32) i32 { + // 终止条件 + if (n == 1) { + return 1; + } + // 递:递归调用 + var res: i32 = recur(n - 1); + // 归:返回结果 + return n + res; + } + ``` + +The Figure 2-3 shows the recursion of the function. + +{ class="animation-figure" } + +Figure 2-3 Recursion process for the summation function
+ +Although iteration and recursion can yield the same results from a computational point of view, **they represent two completely different paradigms for thinking about and solving problems**. + +- **Iteration**: solving problems "from the bottom up". Start with the most basic steps and repeat or add to them until the task is completed. +- **Recursion**: solving problems "from the top down". The original problem is broken down into smaller subproblems that have the same form as the original problem. Next, the subproblem continues to be broken down into smaller subproblems until it stops at the base case (the solution to the base case is known). + +As an example of the above summation function, set the problem $f(n) = 1 + 2 + \dots + n$ . + +- **Iteration**: the summation process is simulated in a loop, iterating from $1$ to $n$ and executing the summation operation in each round to find $f(n)$. +- **Recursion**: decompose the problem into subproblems $f(n) = n + f(n-1)$ and keep (recursively) decomposing until the base case $f(1) = 1$ terminates. + +### 1. Call The Stack + +Each time a recursion function calls itself, the system allocates memory for the newly opened function to store local variables, call addresses, other information, and so on. This results in two things. + +- The context data for a function is stored in an area of memory called "stack frame space" and is not freed until the function returns. As a result, **recursion is usually more memory-intensive than iteration**. +- Recursion calls to functions incur additional overhead. **Therefore recursion is usually less time efficient than loops**. + +As shown in the Figure 2-4 , before the termination condition is triggered, there are $n$ unreturned recursion functions at the same time, **with a recursion depth of $n$** . + +{ class="animation-figure" } + +Figure 2-4 Recursion call depth
+ +In practice, the depth of recursion allowed by a programming language is usually limited, and too deep a recursion may result in a stack overflow error. + +### 2. Tail Recursion + +Interestingly, **if a function makes a recursion call only at the last step before returning**, the function can be optimized by the compiler or interpreter to be comparable to iteration in terms of space efficiency. This situation is called "tail recursion tail recursion". + +- **Ordinary recursion**: when a function returns to a function at a higher level, it needs to continue executing the code, so the system needs to save the context of the previous call. +- **tail recursion**: the recursion call is the last operation before the function returns, which means that the function does not need to continue with other operations after returning to the previous level, so the system does not need to save the context of the previous function. + +In the case of calculating $1 + 2 + \dots + n$, for example, we can implement tail recursion by setting the result variable `res` as a function parameter. + +=== "Python" + + ```python title="recursion.py" + def tail_recur(n, res): + """尾递归""" + # 终止条件 + if n == 0: + return res + # 尾递归调用 + return tail_recur(n - 1, res + n) + ``` + +=== "C++" + + ```cpp title="recursion.cpp" + /* 尾递归 */ + int tailRecur(int n, int res) { + // 终止条件 + if (n == 0) + return res; + // 尾递归调用 + return tailRecur(n - 1, res + n); + } + ``` + +=== "Java" + + ```java title="recursion.java" + /* 尾递归 */ + int tailRecur(int n, int res) { + // 终止条件 + if (n == 0) + return res; + // 尾递归调用 + return tailRecur(n - 1, res + n); + } + ``` + +=== "C#" + + ```csharp title="recursion.cs" + /* 尾递归 */ + int TailRecur(int n, int res) { + // 终止条件 + if (n == 0) + return res; + // 尾递归调用 + return TailRecur(n - 1, res + n); + } + ``` + +=== "Go" + + ```go title="recursion.go" + /* 尾递归 */ + func tailRecur(n int, res int) int { + // 终止条件 + if n == 0 { + return res + } + // 尾递归调用 + return tailRecur(n-1, res+n) + } + ``` + +=== "Swift" + + ```swift title="recursion.swift" + /* 尾递归 */ + func tailRecur(n: Int, res: Int) -> Int { + // 终止条件 + if n == 0 { + return res + } + // 尾递归调用 + return tailRecur(n: n - 1, res: res + n) + } + ``` + +=== "JS" + + ```javascript title="recursion.js" + /* 尾递归 */ + function tailRecur(n, res) { + // 终止条件 + if (n === 0) return res; + // 尾递归调用 + return tailRecur(n - 1, res + n); + } + ``` + +=== "TS" + + ```typescript title="recursion.ts" + /* 尾递归 */ + function tailRecur(n: number, res: number): number { + // 终止条件 + if (n === 0) return res; + // 尾递归调用 + return tailRecur(n - 1, res + n); + } + ``` + +=== "Dart" + + ```dart title="recursion.dart" + /* 尾递归 */ + int tailRecur(int n, int res) { + // 终止条件 + if (n == 0) return res; + // 尾递归调用 + return tailRecur(n - 1, res + n); + } + ``` + +=== "Rust" + + ```rust title="recursion.rs" + /* 尾递归 */ + fn tail_recur(n: i32, res: i32) -> i32 { + // 终止条件 + if n == 0 { + return res; + } + // 尾递归调用 + tail_recur(n - 1, res + n) + } + ``` + +=== "C" + + ```c title="recursion.c" + /* 尾递归 */ + int tailRecur(int n, int res) { + // 终止条件 + if (n == 0) + return res; + // 尾递归调用 + return tailRecur(n - 1, res + n); + } + ``` + +=== "Zig" + + ```zig title="recursion.zig" + // 尾递归函数 + fn tailRecur(n: i32, res: i32) i32 { + // 终止条件 + if (n == 0) { + return res; + } + // 尾递归调用 + return tailRecur(n - 1, res + n); + } + ``` + +The execution of tail recursion is shown in the Figure 2-5 . Comparing normal recursion and tail recursion, the execution point of the summation operation is different. + +- **Ordinary recursion**: the summing operation is performed during the "return" process, and the summing operation is performed again after returning from each level. +- **Tail recursion**: the summing operation is performed in a "recursion" process, the "recursion" process simply returns in levels. + +{ class="animation-figure" } + +Figure 2-5 tail recursion process
+ +!!! tip + + Note that many compilers or interpreters do not support tail recursion optimization. For example, Python does not support tail recursion optimization by default, so even if a function is tail recursive, you may still encounter stack overflow problems. + +### 3. Recursion Tree + +When dealing with algorithmic problems related to divide and conquer, recursion is often more intuitive and easier to read than iteration. Take the Fibonacci sequence as an example. + +!!! question + + Given a Fibonacci series $0, 1, 1, 2, 3, 5, 8, 13, \dots$ , find the $n$th number of the series. + +Let the $n$th number of the Fibonacci series be $f(n)$ , which leads to two easy conclusions. + +- The first two numbers of the series are $f(1) = 0$ and $f(2) = 1$. +- Each number in the series is the sum of the previous two numbers, i.e. $f(n) = f(n - 1) + f(n - 2)$ . + +Recursion code can be written by making recursion calls according to the recursion relationship, using the first two numbers as termination conditions. Call `fib(n)` to get the $n$th number of the Fibonacci series. + +=== "Python" + + ```python title="recursion.py" + def fib(n: int) -> int: + """斐波那契数列:递归""" + # 终止条件 f(1) = 0, f(2) = 1 + if n == 1 or n == 2: + return n - 1 + # 递归调用 f(n) = f(n-1) + f(n-2) + res = fib(n - 1) + fib(n - 2) + # 返回结果 f(n) + return res + ``` + +=== "C++" + + ```cpp title="recursion.cpp" + /* 斐波那契数列:递归 */ + int fib(int n) { + // 终止条件 f(1) = 0, f(2) = 1 + if (n == 1 || n == 2) + return n - 1; + // 递归调用 f(n) = f(n-1) + f(n-2) + int res = fib(n - 1) + fib(n - 2); + // 返回结果 f(n) + return res; + } + ``` + +=== "Java" + + ```java title="recursion.java" + /* 斐波那契数列:递归 */ + int fib(int n) { + // 终止条件 f(1) = 0, f(2) = 1 + if (n == 1 || n == 2) + return n - 1; + // 递归调用 f(n) = f(n-1) + f(n-2) + int res = fib(n - 1) + fib(n - 2); + // 返回结果 f(n) + return res; + } + ``` + +=== "C#" + + ```csharp title="recursion.cs" + /* 斐波那契数列:递归 */ + int Fib(int n) { + // 终止条件 f(1) = 0, f(2) = 1 + if (n == 1 || n == 2) + return n - 1; + // 递归调用 f(n) = f(n-1) + f(n-2) + int res = Fib(n - 1) + Fib(n - 2); + // 返回结果 f(n) + return res; + } + ``` + +=== "Go" + + ```go title="recursion.go" + /* 斐波那契数列:递归 */ + func fib(n int) int { + // 终止条件 f(1) = 0, f(2) = 1 + if n == 1 || n == 2 { + return n - 1 + } + // 递归调用 f(n) = f(n-1) + f(n-2) + res := fib(n-1) + fib(n-2) + // 返回结果 f(n) + return res + } + ``` + +=== "Swift" + + ```swift title="recursion.swift" + /* 斐波那契数列:递归 */ + func fib(n: Int) -> Int { + // 终止条件 f(1) = 0, f(2) = 1 + if n == 1 || n == 2 { + return n - 1 + } + // 递归调用 f(n) = f(n-1) + f(n-2) + let res = fib(n: n - 1) + fib(n: n - 2) + // 返回结果 f(n) + return res + } + ``` + +=== "JS" + + ```javascript title="recursion.js" + /* 斐波那契数列:递归 */ + function fib(n) { + // 终止条件 f(1) = 0, f(2) = 1 + if (n === 1 || n === 2) return n - 1; + // 递归调用 f(n) = f(n-1) + f(n-2) + const res = fib(n - 1) + fib(n - 2); + // 返回结果 f(n) + return res; + } + ``` + +=== "TS" + + ```typescript title="recursion.ts" + /* 斐波那契数列:递归 */ + function fib(n: number): number { + // 终止条件 f(1) = 0, f(2) = 1 + if (n === 1 || n === 2) return n - 1; + // 递归调用 f(n) = f(n-1) + f(n-2) + const res = fib(n - 1) + fib(n - 2); + // 返回结果 f(n) + return res; + } + ``` + +=== "Dart" + + ```dart title="recursion.dart" + /* 斐波那契数列:递归 */ + int fib(int n) { + // 终止条件 f(1) = 0, f(2) = 1 + if (n == 1 || n == 2) return n - 1; + // 递归调用 f(n) = f(n-1) + f(n-2) + int res = fib(n - 1) + fib(n - 2); + // 返回结果 f(n) + return res; + } + ``` + +=== "Rust" + + ```rust title="recursion.rs" + /* 斐波那契数列:递归 */ + fn fib(n: i32) -> i32 { + // 终止条件 f(1) = 0, f(2) = 1 + if n == 1 || n == 2 { + return n - 1; + } + // 递归调用 f(n) = f(n-1) + f(n-2) + let res = fib(n - 1) + fib(n - 2); + // 返回结果 + res + } + ``` + +=== "C" + + ```c title="recursion.c" + /* 斐波那契数列:递归 */ + int fib(int n) { + // 终止条件 f(1) = 0, f(2) = 1 + if (n == 1 || n == 2) + return n - 1; + // 递归调用 f(n) = f(n-1) + f(n-2) + int res = fib(n - 1) + fib(n - 2); + // 返回结果 f(n) + return res; + } + ``` + +=== "Zig" + + ```zig title="recursion.zig" + // 斐波那契数列 + fn fib(n: i32) i32 { + // 终止条件 f(1) = 0, f(2) = 1 + if (n == 1 or n == 2) { + return n - 1; + } + // 递归调用 f(n) = f(n-1) + f(n-2) + var res: i32 = fib(n - 1) + fib(n - 2); + // 返回结果 f(n) + return res; + } + ``` + +Looking at the above code, we have recursively called two functions within a function, **this means that from one call, two call branches are created**. As shown in the Figure 2-6 , this recursion will result in a recursion tree with the number of levels $n$. + +{ class="animation-figure" } + +Figure 2-6 Recursion tree for Fibonacci series
+ +Essentially, recursion embodies the paradigm of "breaking down a problem into smaller sub-problems", and this divide and conquer strategy is essential. + +- From an algorithmic point of view, many important algorithmic strategies such as searching, sorting algorithm, backtracking, divide and conquer, dynamic programming, etc. directly or indirectly apply this way of thinking. +- From a data structure point of view, recursion is naturally suited to problems related to linked lists, trees and graphs because they are well suited to be analyzed with the idea of partitioning. + +## 2.2.3 Compare The Two + +To summarize the above, as shown in the Table 2-1 , iteration and recursion differ in implementation, performance and applicability. + +Table 2-1 Comparison of iteration and recursion features
+ +Figure 2-15 Associated spaces used by the algorithm
+ +=== "Python" + + ```python title="" + class Node: + """Classes"""" + def __init__(self, x: int): + self.val: int = x # node value + self.next: Node | None = None # reference to the next node + + def function() -> int: + """"Functions""""" + # Perform certain operations... + return 0 + + def algorithm(n) -> int: # input data + A = 0 # temporary data (constant, usually in uppercase) + b = 0 # temporary data (variable) + node = Node(0) # temporary data (object) + c = function() # Stack frame space (call function) + return A + b + c # output data + ``` + +=== "C++" + + ```cpp title="" + /* Structures */ + struct Node { + int val; + Node *next; + Node(int x) : val(x), next(nullptr) {} + }; + + /* Functions */ + int func() { + // Perform certain operations... + return 0; + } + + int algorithm(int n) { // input data + const int a = 0; // temporary data (constant) + int b = 0; // temporary data (variable) + Node* node = new Node(0); // temporary data (object) + int c = func(); // stack frame space (call function) + return a + b + c; // output data + } + ``` + +=== "Java" + + ```java title="" + /* Classes */ + class Node { + int val; + Node next; + Node(int x) { val = x; } + } + + /* Functions */ + int function() { + // Perform certain operations... + return 0; + } + + int algorithm(int n) { // input data + final int a = 0; // temporary data (constant) + int b = 0; // temporary data (variable) + Node node = new Node(0); // temporary data (object) + int c = function(); // stack frame space (call function) + return a + b + c; // output data + } + ``` + +=== "C#" + + ```csharp title="" + /* Classes */ + class Node { + int val; + Node next; + Node(int x) { val = x; } + } + + /* Functions */ + int Function() { + // Perform certain operations... + return 0; + } + + int Algorithm(int n) { // input data + const int a = 0; // temporary data (constant) + int b = 0; // temporary data (variable) + Node node = new(0); // temporary data (object) + int c = Function(); // stack frame space (call function) + return a + b + c; // output data + } + ``` + +=== "Go" + + ```go title="" + /* Structures */ + type node struct { + val int + next *node + } + + /* Create node structure */ + func newNode(val int) *node { + return &node{val: val} + } + + /* Functions */ + func function() int { + // Perform certain operations... + return 0 + } + + func algorithm(n int) int { // input data + const a = 0 // temporary data (constant) + b := 0 // temporary storage of data (variable) + newNode(0) // temporary data (object) + c := function() // stack frame space (call function) + return a + b + c // output data + } + ``` + +=== "Swift" + + ```swift title="" + /* Classes */ + class Node { + var val: Int + var next: Node? + + init(x: Int) { + val = x + } + } + + /* Functions */ + func function() -> Int { + // Perform certain operations... + return 0 + } + + func algorithm(n: Int) -> Int { // input data + let a = 0 // temporary data (constant) + var b = 0 // temporary data (variable) + let node = Node(x: 0) // temporary data (object) + let c = function() // stack frame space (call function) + return a + b + c // output data + } + ``` + +=== "JS" + + ```javascript title="" + /* Classes */ + class Node { + val; + next; + constructor(val) { + this.val = val === undefined ? 0 : val; // node value + this.next = null; // reference to the next node + } + } + + /* Functions */ + function constFunc() { + // Perform certain operations + return 0; + } + + function algorithm(n) { // input data + const a = 0; // temporary data (constant) + let b = 0; // temporary data (variable) + const node = new Node(0); // temporary data (object) + const c = constFunc(); // Stack frame space (calling function) + return a + b + c; // output data + } + ``` + +=== "TS" + + ```typescript title="" + /* Classes */ + class Node { + val: number; + next: Node | null; + constructor(val?: number) { + this.val = val === undefined ? 0 : val; // node value + this.next = null; // reference to the next node + } + } + + /* Functions */ + function constFunc(): number { + // Perform certain operations + return 0; + } + + function algorithm(n: number): number { // input data + const a = 0; // temporary data (constant) + let b = 0; // temporary data (variable) + const node = new Node(0); // temporary data (object) + const c = constFunc(); // Stack frame space (calling function) + return a + b + c; // output data + } + ``` + +=== "Dart" + + ```dart title="" + /* Classes */ + class Node { + int val; + Node next; + Node(this.val, [this.next]); + } + + /* Functions */ + int function() { + // Perform certain operations... + return 0; + } + + int algorithm(int n) { // input data + const int a = 0; // temporary data (constant) + int b = 0; // temporary data (variable) + Node node = Node(0); // temporary data (object) + int c = function(); // stack frame space (call function) + return a + b + c; // output data + } + ``` + +=== "Rust" + + ```rust title="" + use std::rc::Rc; + use std::cell::RefCell; + + /* Structures */ + struct Node { + val: i32, + next: OptionFigure 2-16 Common space complexity types
+ +### 1. Constant Order $O(1)$ + +Constant order is common for constants, variables, and objects whose quantity is unrelated to the size of the input data $n$. + +It is important to note that memory occupied by initializing a variable or calling a function in a loop is released once the next iteration begins. Therefore, there is no accumulation of occupied space and the space complexity remains $O(1)$ : + +=== "Python" + + ```python title="space_complexity.py" + def function() -> int: + """函数""" + # 执行某些操作 + return 0 + + def constant(n: int): + """常数阶""" + # 常量、变量、对象占用 O(1) 空间 + a = 0 + nums = [0] * 10000 + node = ListNode(0) + # 循环中的变量占用 O(1) 空间 + for _ in range(n): + c = 0 + # 循环中的函数占用 O(1) 空间 + for _ in range(n): + function() + ``` + +=== "C++" + + ```cpp title="space_complexity.cpp" + /* 函数 */ + int func() { + // 执行某些操作 + return 0; + } + + /* 常数阶 */ + void constant(int n) { + // 常量、变量、对象占用 O(1) 空间 + const int a = 0; + int b = 0; + vectorFigure 2-17 Linear order space complexity generated by recursion function
+ +### 3. Quadratic Order $O(N^2)$ + +Quadratic order is common in matrices and graphs, where the number of elements is in a square relationship with $n$: + +=== "Python" + + ```python title="space_complexity.py" + def quadratic(n: int): + """平方阶""" + # 二维列表占用 O(n^2) 空间 + num_matrix = [[0] * n for _ in range(n)] + ``` + +=== "C++" + + ```cpp title="space_complexity.cpp" + /* 平方阶 */ + void quadratic(int n) { + // 二维列表占用 O(n^2) 空间 + vectorFigure 2-18 Square-order space complexity generated by the recursion function
+ +### 4. Exponential Order $O(2^N)$ + +Exponential order is common in binary trees. Looking at the Figure 2-19 , a "full binary tree" of degree $n$ has $2^n - 1$ nodes, occupying $O(2^n)$ space: + +=== "Python" + + ```python title="space_complexity.py" + def build_tree(n: int) -> TreeNode | None: + """指数阶(建立满二叉树)""" + if n == 0: + return None + root = TreeNode(0) + root.left = build_tree(n - 1) + root.right = build_tree(n - 1) + return root + ``` + +=== "C++" + + ```cpp title="space_complexity.cpp" + /* 指数阶(建立满二叉树) */ + TreeNode *buildTree(int n) { + if (n == 0) + return nullptr; + TreeNode *root = new TreeNode(0); + root->left = buildTree(n - 1); + root->right = buildTree(n - 1); + return root; + } + ``` + +=== "Java" + + ```java title="space_complexity.java" + /* 指数阶(建立满二叉树) */ + TreeNode buildTree(int n) { + if (n == 0) + return null; + TreeNode root = new TreeNode(0); + root.left = buildTree(n - 1); + root.right = buildTree(n - 1); + return root; + } + ``` + +=== "C#" + + ```csharp title="space_complexity.cs" + /* 指数阶(建立满二叉树) */ + TreeNode? BuildTree(int n) { + if (n == 0) return null; + TreeNode root = new(0) { + left = BuildTree(n - 1), + right = BuildTree(n - 1) + }; + return root; + } + ``` + +=== "Go" + + ```go title="space_complexity.go" + /* 指数阶(建立满二叉树) */ + func buildTree(n int) *treeNode { + if n == 0 { + return nil + } + root := newTreeNode(0) + root.left = buildTree(n - 1) + root.right = buildTree(n - 1) + return root + } + ``` + +=== "Swift" + + ```swift title="space_complexity.swift" + /* 指数阶(建立满二叉树) */ + func buildTree(n: Int) -> TreeNode? { + if n == 0 { + return nil + } + let root = TreeNode(x: 0) + root.left = buildTree(n: n - 1) + root.right = buildTree(n: n - 1) + return root + } + ``` + +=== "JS" + + ```javascript title="space_complexity.js" + /* 指数阶(建立满二叉树) */ + function buildTree(n) { + if (n === 0) return null; + const root = new TreeNode(0); + root.left = buildTree(n - 1); + root.right = buildTree(n - 1); + return root; + } + ``` + +=== "TS" + + ```typescript title="space_complexity.ts" + /* 指数阶(建立满二叉树) */ + function buildTree(n: number): TreeNode | null { + if (n === 0) return null; + const root = new TreeNode(0); + root.left = buildTree(n - 1); + root.right = buildTree(n - 1); + return root; + } + ``` + +=== "Dart" + + ```dart title="space_complexity.dart" + /* 指数阶(建立满二叉树) */ + TreeNode? buildTree(int n) { + if (n == 0) return null; + TreeNode root = TreeNode(0); + root.left = buildTree(n - 1); + root.right = buildTree(n - 1); + return root; + } + ``` + +=== "Rust" + + ```rust title="space_complexity.rs" + /* 指数阶(建立满二叉树) */ + fn build_tree(n: i32) -> OptionFigure 2-19 Exponential order space complexity generated by a full binary tree
+ +### 5. Logarithmic Order $O(\Log N)$ + +Logarithmic order is commonly used in divide and conquer algorithms. For example, in a merge sort, given an array of length $n$ as the input, each round of recursion divides the array in half from its midpoint to form a recursion tree of height $\log n$, using $O(\log n)$ stack frame space. + +Another example is to convert a number into a string. Given a positive integer $n$ with a digit count of $\log_{10} n + 1$, the corresponding string length is $\log_{10} n + 1$. Therefore, the space complexity is $O(\log_{10} n + 1) = O(\log n)$. + +## 2.4.4 Weighing Time And Space + +Ideally, we would like to optimize both the time complexity and the space complexity of an algorithm. However, in reality, simultaneously optimizing time and space complexity is often challenging. + +**Reducing time complexity usually comes at the expense of increasing space complexity, and vice versa**. The approach of sacrificing memory space to improve algorithm speed is known as "trading space for time", while the opposite is called "trading time for space". + +The choice between these approaches depends on which aspect we prioritize. In most cases, time is more valuable than space, so "trading space for time" is usually the more common strategy. Of course, in situations with large data volumes, controlling space complexity is also crucial. diff --git a/docs-en/chapter_computational_complexity/summary.md b/docs-en/chapter_computational_complexity/summary.md new file mode 100644 index 000000000..e34b247da --- /dev/null +++ b/docs-en/chapter_computational_complexity/summary.md @@ -0,0 +1,53 @@ +--- +comments: true +--- + +# 2.5 Summary + +### 1. Highlights + +**Evaluation of Algorithm Efficiency** + +- Time and space efficiency are the two leading evaluation indicators to measure an algorithm. +- We can evaluate the efficiency of an algorithm through real-world testing. Still, it isn't easy to eliminate the side effects from the testing environment, and it consumes a lot of computational resources. +- Complexity analysis overcomes the drawbacks of real-world testing. The analysis results can apply to all operating platforms and reveal the algorithm's efficiency under variant data scales. + +**Time Complexity** + +- Time complexity is used to measure the trend of algorithm running time as the data size grows., which can effectively evaluate the algorithm's efficiency. However, it may fail in some cases, such as when the input volume is small or the time complexities are similar, making it difficult to precisely compare the efficiency of algorithms. +- The worst time complexity is denoted by big $O$ notation, which corresponds to the asymptotic upper bound of the function, reflecting the growth rate in the number of operations $T(n)$ as $n$ tends to positive infinity. +- The estimation of time complexity involves two steps: first, counting the number of operations, and then determining the asymptotic upper bound. +- Common time complexities, from lowest to highest, are $O(1)$, $O(\log n)$, $O(n)$, $O(n \log n)$, $O(n^2)$, $O(2^n)$, and $O(n!)$. +- The time complexity of certain algorithms is not fixed and depends on the distribution of the input data. The time complexity can be categorized into worst-case, best-case, and average. The best-case time complexity is rarely used because the input data must meet strict conditions to achieve the best-case. +- The average time complexity reflects the efficiency of an algorithm with random data inputs, which is closest to the performance of algorithms in real-world scenarios. Calculating the average time complexity requires statistical analysis of input data and a synthesized mathematical expectation. + +**Space Complexity** + +- Space complexity serves a similar purpose to time complexity and is used to measure the trend of space occupied by an algorithm as the data volume increases. +- The memory space associated with the operation of an algorithm can be categorized into input space, temporary space, and output space. Normally, the input space is not considered when determining space complexity. The temporary space can be classified into instruction space, data space, and stack frame space, and the stack frame space usually only affects the space complexity for recursion functions. +- We mainly focus on the worst-case space complexity, which refers to the measurement of an algorithm's space usage when given the worst-case input data and during the worst-case execution scenario. +- Common space complexities are $O(1)$, $O(\log n)$, $O(n)$, $O(n^2)$ and $O(2^n)$ from lowest to highest. + +### 2. Q & A + +!!! question "Is the space complexity of tail recursion $O(1)$?" + + Theoretically, the space complexity of a tail recursion function can be optimized to $O(1)$. However, most programming languages (e.g., Java, Python, C++, Go, C#, etc.) do not support auto-optimization for tail recursion, so the space complexity is usually considered as $O(n)$. + +!!! question "What is the difference between the terms function and method?" + + A *function* can be executed independently, and all arguments are passed explicitly. A *method* is associated with an object and is implicitly passed to the object that calls it, allowing it to operate on the data contained within an instance of a class. + + Let's illustrate with a few common programming languages. + + - C is a procedural programming language without object-oriented concepts, so it has only functions. However, we can simulate object-oriented programming by creating structures (struct), and the functions associated with structures are equivalent to methods in other languages. + - Java and C# are object-oriented programming languages, and blocks of code (methods) are typically part of a class. Static methods behave like a function because it is bound to the class and cannot access specific instance variables. + - Both C++ and Python support both procedural programming (functions) and object-oriented programming (methods). + +!!! question "Does the figure "Common Types of Space Complexity" reflect the absolute size of the occupied space?" + + No, that figure shows the space complexity, which reflects the growth trend, not the absolute size of the space occupied. + + For example, if you take $n = 8$ , the values of each curve do not align with the function because each curve contains a constant term used to compress the range of values to a visually comfortable range. + + In practice, since we usually don't know each method's "constant term" complexity, it is generally impossible to choose the optimal solution for $n = 8$ based on complexity alone. But it's easier to choose for $n = 8^5$ as the growth trend is already dominant. diff --git a/docs-en/chapter_computational_complexity/time_complexity.md b/docs-en/chapter_computational_complexity/time_complexity.md new file mode 100644 index 000000000..4a5490410 --- /dev/null +++ b/docs-en/chapter_computational_complexity/time_complexity.md @@ -0,0 +1,3369 @@ +--- +comments: true +--- + +# 2.3 Time Complexity + +Runtime can be a visual and accurate reflection of the efficiency of an algorithm. What should we do if we want to accurately predict the runtime of a piece of code? + +1. **Determine the running platform**, including hardware configuration, programming language, system environment, etc., all of which affect the efficiency of the code. +2. **Evaluates the running time** required for various computational operations, e.g., the addition operation `+` takes 1 ns, the multiplication operation `*` takes 10 ns, the print operation `print()` takes 5 ns, and so on. +3. **Counts all the computational operations in the code** and sums the execution times of all the operations to get the runtime. + +For example, in the following code, the input data size is $n$ : + +=== "Python" + + ```python title="" + # Under an operating platform + def algorithm(n: int): + a = 2 # 1 ns + a = a + 1 # 1 ns + a = a * 2 # 10 ns + # Cycle n times + for _ in range(n): # 1 ns + print(0) # 5 ns + ``` + +=== "C++" + + ```cpp title="" + // Under a particular operating platform + void algorithm(int n) { + int a = 2; // 1 ns + a = a + 1; // 1 ns + a = a * 2; // 10 ns + // Loop n times + for (int i = 0; i < n; i++) { // 1 ns , every round i++ is executed + cout << 0 << endl; // 5 ns + } + } + ``` + +=== "Java" + + ```java title="" + // Under a particular operating platform + void algorithm(int n) { + int a = 2; // 1 ns + a = a + 1; // 1 ns + a = a * 2; // 10 ns + // Loop n times + for (int i = 0; i < n; i++) { // 1 ns , every round i++ is executed + System.out.println(0); // 5 ns + } + } + ``` + +=== "C#" + + ```csharp title="" + // Under a particular operating platform + void Algorithm(int n) { + int a = 2; // 1 ns + a = a + 1; // 1 ns + a = a * 2; // 10 ns + // Loop n times + for (int i = 0; i < n; i++) { // 1 ns , every round i++ is executed + Console.WriteLine(0); // 5 ns + } + } + ``` + +=== "Go" + + ```go title="" + // Under a particular operating platform + func algorithm(n int) { + a := 2 // 1 ns + a = a + 1 // 1 ns + a = a * 2 // 10 ns + // Loop n times + for i := 0; i < n; i++ { // 1 ns + fmt.Println(a) // 5 ns + } + } + ``` + +=== "Swift" + + ```swift title="" + // Under a particular operating platform + func algorithm(n: Int) { + var a = 2 // 1 ns + a = a + 1 // 1 ns + a = a * 2 // 10 ns + // Loop n times + for _ in 0 ..< n { // 1 ns + print(0) // 5 ns + } + } + ``` + +=== "JS" + + ```javascript title="" + // Under a particular operating platform + function algorithm(n) { + var a = 2; // 1 ns + a = a + 1; // 1 ns + a = a * 2; // 10 ns + // Loop n times + for(let i = 0; i < n; i++) { // 1 ns , every round i++ is executed + console.log(0); // 5 ns + } + } + ``` + +=== "TS" + + ```typescript title="" + // Under a particular operating platform + function algorithm(n: number): void { + var a: number = 2; // 1 ns + a = a + 1; // 1 ns + a = a * 2; // 10 ns + // Loop n times + for(let i = 0; i < n; i++) { // 1 ns , every round i++ is executed + console.log(0); // 5 ns + } + } + ``` + +=== "Dart" + + ```dart title="" + // Under a particular operating platform + void algorithm(int n) { + int a = 2; // 1 ns + a = a + 1; // 1 ns + a = a * 2; // 10 ns + // Loop n times + for (int i = 0; i < n; i++) { // 1 ns , every round i++ is executed + print(0); // 5 ns + } + } + ``` + +=== "Rust" + + ```rust title="" + // Under a particular operating platform + fn algorithm(n: i32) { + let mut a = 2; // 1 ns + a = a + 1; // 1 ns + a = a * 2; // 10 ns + // Loop n times + for _ in 0..n { // 1 ns for each round i++ + println!("{}", 0); // 5 ns + } + } + ``` + +=== "C" + + ```c title="" + // Under a particular operating platform + void algorithm(int n) { + int a = 2; // 1 ns + a = a + 1; // 1 ns + a = a * 2; // 10 ns + // Loop n times + for (int i = 0; i < n; i++) { // 1 ns , every round i++ is executed + printf("%d", 0); // 5 ns + } + } + ``` + +=== "Zig" + + ```zig title="" + // Under a particular operating platform + fn algorithm(n: usize) void { + var a: i32 = 2; // 1 ns + a += 1; // 1 ns + a *= 2; // 10 ns + // Loop n times + for (0..n) |_| { // 1 ns + std.debug.print("{}\n", .{0}); // 5 ns + } + } + ``` + +Based on the above method, the algorithm running time can be obtained as $6n + 12$ ns : + +$$ +1 + 1 + 10 + (1 + 5) \times n = 6n + 12 +$$ + +In practice, however, **statistical algorithm runtimes are neither reasonable nor realistic**. First, we do not want to tie the estimation time to the operation platform, because the algorithm needs to run on a variety of different platforms. Second, it is difficult for us to be informed of the runtime of each operation, which makes the prediction process extremely difficult. + +## 2.3.1 Trends In Statistical Time Growth + +The time complexity analysis counts not the algorithm running time, **but the tendency of the algorithm running time to increase as the amount of data gets larger**. + +The concept of "time-growing trend" is rather abstract, so let's try to understand it through an example. Suppose the size of the input data is $n$, and given three algorithmic functions `A`, `B` and `C`: + +=== "Python" + + ```python title="" + # Time complexity of algorithm A: constant order + def algorithm_A(n: int): + print(0) + # Time complexity of algorithm B: linear order + def algorithm_B(n: int): + for _ in range(n): + print(0) + # Time complexity of algorithm C: constant order + def algorithm_C(n: int): + for _ in range(1000000): + print(0) + ``` + +=== "C++" + + ```cpp title="" + // Time complexity of algorithm A: constant order + void algorithm_A(int n) { + cout << 0 << endl; + } + // Time complexity of algorithm B: linear order + void algorithm_B(int n) { + for (int i = 0; i < n; i++) { + cout << 0 << endl; + } + } + // Time complexity of algorithm C: constant order + void algorithm_C(int n) { + for (int i = 0; i < 1000000; i++) { + cout << 0 << endl; + } + } + ``` + +=== "Java" + + ```java title="" + // Time complexity of algorithm A: constant order + void algorithm_A(int n) { + System.out.println(0); + } + // Time complexity of algorithm B: linear order + void algorithm_B(int n) { + for (int i = 0; i < n; i++) { + System.out.println(0); + } + } + // Time complexity of algorithm C: constant order + void algorithm_C(int n) { + for (int i = 0; i < 1000000; i++) { + System.out.println(0); + } + } + ``` + +=== "C#" + + ```csharp title="" + // Time complexity of algorithm A: constant order + void AlgorithmA(int n) { + Console.WriteLine(0); + } + // Time complexity of algorithm B: linear order + void AlgorithmB(int n) { + for (int i = 0; i < n; i++) { + Console.WriteLine(0); + } + } + // Time complexity of algorithm C: constant order + void AlgorithmC(int n) { + for (int i = 0; i < 1000000; i++) { + Console.WriteLine(0); + } + } + ``` + +=== "Go" + + ```go title="" + // Time complexity of algorithm A: constant order + func algorithm_A(n int) { + fmt.Println(0) + } + // Time complexity of algorithm B: linear order + func algorithm_B(n int) { + for i := 0; i < n; i++ { + fmt.Println(0) + } + } + // Time complexity of algorithm C: constant order + func algorithm_C(n int) { + for i := 0; i < 1000000; i++ { + fmt.Println(0) + } + } + ``` + +=== "Swift" + + ```swift title="" + // Time complexity of algorithm A: constant order + func algorithmA(n: Int) { + print(0) + } + + // Time complexity of algorithm B: linear order + func algorithmB(n: Int) { + for _ in 0 ..< n { + print(0) + } + } + + // Time complexity of algorithm C: constant order + func algorithmC(n: Int) { + for _ in 0 ..< 1000000 { + print(0) + } + } + ``` + +=== "JS" + + ```javascript title="" + // Time complexity of algorithm A: constant order + function algorithm_A(n) { + console.log(0); + } + // Time complexity of algorithm B: linear order + function algorithm_B(n) { + for (let i = 0; i < n; i++) { + console.log(0); + } + } + // Time complexity of algorithm C: constant order + function algorithm_C(n) { + for (let i = 0; i < 1000000; i++) { + console.log(0); + } + } + + ``` + +=== "TS" + + ```typescript title="" + // Time complexity of algorithm A: constant order + function algorithm_A(n: number): void { + console.log(0); + } + // Time complexity of algorithm B: linear order + function algorithm_B(n: number): void { + for (let i = 0; i < n; i++) { + console.log(0); + } + } + // Time complexity of algorithm C: constant order + function algorithm_C(n: number): void { + for (let i = 0; i < 1000000; i++) { + console.log(0); + } + } + ``` + +=== "Dart" + + ```dart title="" + // Time complexity of algorithm A: constant order + void algorithmA(int n) { + print(0); + } + // Time complexity of algorithm B: linear order + void algorithmB(int n) { + for (int i = 0; i < n; i++) { + print(0); + } + } + // Time complexity of algorithm C: constant order + void algorithmC(int n) { + for (int i = 0; i < 1000000; i++) { + print(0); + } + } + ``` + +=== "Rust" + + ```rust title="" + // Time complexity of algorithm A: constant order + fn algorithm_A(n: i32) { + println!("{}", 0); + } + // Time complexity of algorithm B: linear order + fn algorithm_B(n: i32) { + for _ in 0..n { + println!("{}", 0); + } + } + // Time complexity of algorithm C: constant order + fn algorithm_C(n: i32) { + for _ in 0..1000000 { + println!("{}", 0); + } + } + ``` + +=== "C" + + ```c title="" + // Time complexity of algorithm A: constant order + void algorithm_A(int n) { + printf("%d", 0); + } + // Time complexity of algorithm B: linear order + void algorithm_B(int n) { + for (int i = 0; i < n; i++) { + printf("%d", 0); + } + } + // Time complexity of algorithm C: constant order + void algorithm_C(int n) { + for (int i = 0; i < 1000000; i++) { + printf("%d", 0); + } + } + ``` + +=== "Zig" + + ```zig title="" + // Time complexity of algorithm A: constant order + fn algorithm_A(n: usize) void { + _ = n; + std.debug.print("{}\n", .{0}); + } + // Time complexity of algorithm B: linear order + fn algorithm_B(n: i32) void { + for (0..n) |_| { + std.debug.print("{}\n", .{0}); + } + } + // Time complexity of algorithm C: constant order + fn algorithm_C(n: i32) void { + _ = n; + for (0..1000000) |_| { + std.debug.print("{}\n", .{0}); + } + } + ``` + +The Figure 2-7 shows the time complexity of the above three algorithmic functions. + +- Algorithm `A` has only $1$ print operations, and the running time of the algorithm does not increase with $n$. We call the time complexity of this algorithm "constant order". +- The print operation in algorithm `B` requires $n$ cycles, and the running time of the algorithm increases linearly with $n$. The time complexity of this algorithm is called "linear order". +- The print operation in algorithm `C` requires $1000000$ loops, which is a long runtime, but it is independent of the size of the input data $n$. Therefore, the time complexity of `C` is the same as that of `A`, which is still of "constant order". + +{ class="animation-figure" } + +Figure 2-7 Time growth trends for algorithms A, B and C
+ +What are the characteristics of time complexity analysis compared to direct statistical algorithmic running time? + +- The **time complexity can effectively evaluate the efficiency of an algorithm**. For example, the running time of algorithm `B` increases linearly and is slower than algorithm `A` for $n > 1$ and slower than algorithm `C` for $n > 1,000,000$. In fact, as long as the input data size $n$ is large enough, algorithms with "constant order" of complexity will always outperform algorithms with "linear order", which is exactly what the time complexity trend means. +- The **time complexity of the projection method is simpler**. Obviously, neither the running platform nor the type of computational operation is related to the growth trend of the running time of the algorithm. Therefore, in the time complexity analysis, we can simply consider the execution time of all computation operations as the same "unit time", and thus simplify the "statistics of the running time of computation operations" to the "statistics of the number of computation operations", which is the same as the "statistics of the number of computation operations". The difficulty of the estimation is greatly reduced by considering the execution time of all operations as the same "unit time". +- There are also some limitations of **time complexity**. For example, although algorithms `A` and `C` have the same time complexity, the actual running time varies greatly. Similarly, although the time complexity of algorithm `B` is higher than that of `C` , algorithm `B` significantly outperforms algorithm `C` when the size of the input data $n$ is small. In these cases, it is difficult to judge the efficiency of an algorithm based on time complexity alone. Of course, despite the above problems, complexity analysis is still the most effective and commonly used method to judge the efficiency of algorithms. + +## 2.3.2 Functions Asymptotic Upper Bounds + +Given a function with input size $n$: + +=== "Python" + + ```python title="" + def algorithm(n: int): + a = 1 # +1 + a = a + 1 # +1 + a = a * 2 # +1 + # Cycle n times + for i in range(n): # +1 + print(0) # +1 + ``` + +=== "C++" + + ```cpp title="" + void algorithm(int n) { + int a = 1; // +1 + a = a + 1; // +1 + a = a * 2; // +1 + // Loop n times + for (int i = 0; i < n; i++) { // +1 (execute i ++ every round) + cout << 0 << endl; // +1 + } + } + ``` + +=== "Java" + + ```java title="" + void algorithm(int n) { + int a = 1; // +1 + a = a + 1; // +1 + a = a * 2; // +1 + // Loop n times + for (int i = 0; i < n; i++) { // +1 (execute i ++ every round) + System.out.println(0); // +1 + } + } + ``` + +=== "C#" + + ```csharp title="" + void Algorithm(int n) { + int a = 1; // +1 + a = a + 1; // +1 + a = a * 2; // +1 + // Loop n times + for (int i = 0; i < n; i++) { // +1 (execute i ++ every round) + Console.WriteLine(0); // +1 + } + } + ``` + +=== "Go" + + ```go title="" + func algorithm(n int) { + a := 1 // +1 + a = a + 1 // +1 + a = a * 2 // +1 + // Loop n times + for i := 0; i < n; i++ { // +1 + fmt.Println(a) // +1 + } + } + ``` + +=== "Swift" + + ```swift title="" + func algorithm(n: Int) { + var a = 1 // +1 + a = a + 1 // +1 + a = a * 2 // +1 + // Loop n times + for _ in 0 ..< n { // +1 + print(0) // +1 + } + } + ``` + +=== "JS" + + ```javascript title="" + function algorithm(n) { + var a = 1; // +1 + a += 1; // +1 + a *= 2; // +1 + // Loop n times + for(let i = 0; i < n; i++){ // +1 (execute i ++ every round) + console.log(0); // +1 + } + } + ``` + +=== "TS" + + ```typescript title="" + function algorithm(n: number): void{ + var a: number = 1; // +1 + a += 1; // +1 + a *= 2; // +1 + // Loop n times + for(let i = 0; i < n; i++){ // +1 (execute i ++ every round) + console.log(0); // +1 + } + } + ``` + +=== "Dart" + + ```dart title="" + void algorithm(int n) { + int a = 1; // +1 + a = a + 1; // +1 + a = a * 2; // +1 + // Loop n times + for (int i = 0; i < n; i++) { // +1 (execute i ++ every round) + print(0); // +1 + } + } + ``` + +=== "Rust" + + ```rust title="" + fn algorithm(n: i32) { + let mut a = 1; // +1 + a = a + 1; // +1 + a = a * 2; // +1 + + // Loop n times + for _ in 0..n { // +1 (execute i ++ every round) + println!("{}", 0); // +1 + } + } + ``` + +=== "C" + + ```c title="" + void algorithm(int n) { + int a = 1; // +1 + a = a + 1; // +1 + a = a * 2; // +1 + // Loop n times + for (int i = 0; i < n; i++) { // +1 (execute i ++ every round) + printf("%d", 0); // +1 + } + } + ``` + +=== "Zig" + + ```zig title="" + fn algorithm(n: usize) void { + var a: i32 = 1; // +1 + a += 1; // +1 + a *= 2; // +1 + // Loop n times + for (0..n) |_| { // +1 (execute i ++ every round) + std.debug.print("{}\n", .{0}); // +1 + } + } + ``` + +Let the number of operations of the algorithm be a function of the size of the input data $n$, denoted as $T(n)$ , then the number of operations of the above function is: + +$$ +T(n) = 3 + 2n +$$ + +$T(n)$ is a primary function, which indicates that the trend of its running time growth is linear, and thus its time complexity is of linear order. + +We denote the time complexity of the linear order as $O(n)$ , and this mathematical notation is called the "big $O$ notation big-$O$ notation", which denotes the "asymptotic upper bound" of the function $T(n)$. + +Time complexity analysis is essentially the computation of asymptotic upper bounds on the "number of operations function $T(n)$", which has a clear mathematical definition. + +!!! abstract "Function asymptotic upper bound" + + If there exists a positive real number $c$ and a real number $n_0$ such that $T(n) \leq c \cdot f(n)$ for all $n > n_0$ , then it can be argued that $f(n)$ gives an asymptotic upper bound on $T(n)$ , denoted as $T(n) = O(f(n))$ . + +As shown in the Figure 2-8 , computing the asymptotic upper bound is a matter of finding a function $f(n)$ such that $T(n)$ and $f(n)$ are at the same growth level as $n$ tends to infinity, differing only by a multiple of the constant term $c$. + +{ class="animation-figure" } + +Figure 2-8 asymptotic upper bound of function
+ +## 2.3.3 Method Of Projection + +Asymptotic upper bounds are a bit heavy on math, so don't worry if you feel you don't have a full solution. Because in practice, we only need to master the projection method, and the mathematical meaning can be gradually comprehended. + +By definition, after determining $f(n)$, we can get the time complexity $O(f(n))$. So how to determine the asymptotic upper bound $f(n)$? The overall is divided into two steps: first count the number of operations, and then determine the asymptotic upper bound. + +### 1. The First Step: Counting The Number Of Operations + +For the code, it is sufficient to calculate from top to bottom line by line. However, since the constant term $c \cdot f(n)$ in the above $c \cdot f(n)$ can take any size, **the various coefficients and constant terms in the number of operations $T(n)$ can be ignored**. Based on this principle, the following counting simplification techniques can be summarized. + +1. **Ignore the constant terms in $T(n)$**. Since none of them are related to $n$, they have no effect on the time complexity. +2. **omits all coefficients**. For example, loops $2n$ times, $5n + 1$ times, etc., can be simplified and notated as $n$ times because the coefficients before $n$ have no effect on the time complexity. +3. **Use multiplication** when loops are nested. The total number of operations is equal to the product of the number of operations of the outer and inner levels of the loop, and each level of the loop can still be nested by applying the techniques in points `1.` and `2.` respectively. + +Given a function, we can use the above trick to count the number of operations. + +=== "Python" + + ```python title="" + def algorithm(n: int): + a = 1 # +0 (trick 1) + a = a + n # +0 (trick 1) + # +n (technique 2) + for i in range(5 * n + 1): + print(0) + # +n*n (technique 3) + for i in range(2 * n): + for j in range(n + 1): + print(0) + ``` + +=== "C++" + + ```cpp title="" + void algorithm(int n) { + int a = 1; // +0 (trick 1) + a = a + n; // +0 (trick 1) + // +n (technique 2) + for (int i = 0; i < 5 * n + 1; i++) { + cout << 0 << endl; + } + // +n*n (technique 3) + for (int i = 0; i < 2 * n; i++) { + for (int j = 0; j < n + 1; j++) { + cout << 0 << endl; + } + } + } + ``` + +=== "Java" + + ```java title="" + void algorithm(int n) { + int a = 1; // +0 (trick 1) + a = a + n; // +0 (trick 1) + // +n (technique 2) + for (int i = 0; i < 5 * n + 1; i++) { + System.out.println(0); + } + // +n*n (technique 3) + for (int i = 0; i < 2 * n; i++) { + for (int j = 0; j < n + 1; j++) { + System.out.println(0); + } + } + } + ``` + +=== "C#" + + ```csharp title="" + void Algorithm(int n) { + int a = 1; // +0 (trick 1) + a = a + n; // +0 (trick 1) + // +n (technique 2) + for (int i = 0; i < 5 * n + 1; i++) { + Console.WriteLine(0); + } + // +n*n (technique 3) + for (int i = 0; i < 2 * n; i++) { + for (int j = 0; j < n + 1; j++) { + Console.WriteLine(0); + } + } + } + ``` + +=== "Go" + + ```go title="" + func algorithm(n int) { + a := 1 // +0 (trick 1) + a = a + n // +0 (trick 1) + // +n (technique 2) + for i := 0; i < 5 * n + 1; i++ { + fmt.Println(0) + } + // +n*n (technique 3) + for i := 0; i < 2 * n; i++ { + for j := 0; j < n + 1; j++ { + fmt.Println(0) + } + } + } + ``` + +=== "Swift" + + ```swift title="" + func algorithm(n: Int) { + var a = 1 // +0 (trick 1) + a = a + n // +0 (trick 1) + // +n (technique 2) + for _ in 0 ..< (5 * n + 1) { + print(0) + } + // +n*n (technique 3) + for _ in 0 ..< (2 * n) { + for _ in 0 ..< (n + 1) { + print(0) + } + } + } + ``` + +=== "JS" + + ```javascript title="" + function algorithm(n) { + let a = 1; // +0 (trick 1) + a = a + n; // +0 (trick 1) + // +n (technique 2) + for (let i = 0; i < 5 * n + 1; i++) { + console.log(0); + } + // +n*n (technique 3) + for (let i = 0; i < 2 * n; i++) { + for (let j = 0; j < n + 1; j++) { + console.log(0); + } + } + } + ``` + +=== "TS" + + ```typescript title="" + function algorithm(n: number): void { + let a = 1; // +0 (trick 1) + a = a + n; // +0 (trick 1) + // +n (technique 2) + for (let i = 0; i < 5 * n + 1; i++) { + console.log(0); + } + // +n*n (technique 3) + for (let i = 0; i < 2 * n; i++) { + for (let j = 0; j < n + 1; j++) { + console.log(0); + } + } + } + ``` + +=== "Dart" + + ```dart title="" + void algorithm(int n) { + int a = 1; // +0 (trick 1) + a = a + n; // +0 (trick 1) + // +n (technique 2) + for (int i = 0; i < 5 * n + 1; i++) { + print(0); + } + // +n*n (technique 3) + for (int i = 0; i < 2 * n; i++) { + for (int j = 0; j < n + 1; j++) { + print(0); + } + } + } + ``` + +=== "Rust" + + ```rust title="" + fn algorithm(n: i32) { + let mut a = 1; // +0 (trick 1) + a = a + n; // +0 (trick 1) + + // +n (technique 2) + for i in 0..(5 * n + 1) { + println!("{}", 0); + } + + // +n*n (technique 3) + for i in 0..(2 * n) { + for j in 0..(n + 1) { + println!("{}", 0); + } + } + } + ``` + +=== "C" + + ```c title="" + void algorithm(int n) { + int a = 1; // +0 (trick 1) + a = a + n; // +0 (trick 1) + // +n (technique 2) + for (int i = 0; i < 5 * n + 1; i++) { + printf("%d", 0); + } + // +n*n (technique 3) + for (int i = 0; i < 2 * n; i++) { + for (int j = 0; j < n + 1; j++) { + printf("%d", 0); + } + } + } + ``` + +=== "Zig" + + ```zig title="" + fn algorithm(n: usize) void { + var a: i32 = 1; // +0 (trick 1) + a = a + @as(i32, @intCast(n)); // +0 (trick 1) + + // +n (technique 2) + for(0..(5 * n + 1)) |_| { + std.debug.print("{}\n", .{0}); + } + + // +n*n (technique 3) + for(0..(2 * n)) |_| { + for(0..(n + 1)) |_| { + std.debug.print("{}\n", .{0}); + } + } + } + ``` + +The following equations show the statistical results before and after using the above technique, both of which were introduced with a time complexity of $O(n^2)$ . + +$$ +\begin{aligned} +T(n) & = 2n(n + 1) + (5n + 1) + 2 & \text{complete statistics (-.-|||)} \newline +& = 2n^2 + 7n + 3 \newline +T(n) & = n^2 + n & \text{Lazy Stats (o.O)} +\end{aligned} +$$ + +### 2. Step 2: Judging The Asymptotic Upper Bounds + +**The time complexity is determined by the highest order term in the polynomial $T(n)$**. This is because as $n$ tends to infinity, the highest order term will play a dominant role and the effects of all other terms can be ignored. + +The Table 2-2 shows some examples, some of which have exaggerated values to emphasize the conclusion that "the coefficients can't touch the order". As $n$ tends to infinity, these constants become irrelevant. + +Table 2-2 Time complexity corresponding to different number of operations
+ +Figure 2-9 Common time complexity types
+ +### 1. Constant Order $O(1)$ + +The number of operations of the constant order is independent of the input data size $n$, i.e., it does not change with $n$. + +In the following function, although the number of operations `size` may be large, the time complexity is still $O(1)$ because it is independent of the input data size $n$ : + +=== "Python" + + ```python title="time_complexity.py" + def constant(n: int) -> int: + """常数阶""" + count = 0 + size = 100000 + for _ in range(size): + count += 1 + return count + ``` + +=== "C++" + + ```cpp title="time_complexity.cpp" + /* 常数阶 */ + int constant(int n) { + int count = 0; + int size = 100000; + for (int i = 0; i < size; i++) + count++; + return count; + } + ``` + +=== "Java" + + ```java title="time_complexity.java" + /* 常数阶 */ + int constant(int n) { + int count = 0; + int size = 100000; + for (int i = 0; i < size; i++) + count++; + return count; + } + ``` + +=== "C#" + + ```csharp title="time_complexity.cs" + /* 常数阶 */ + int Constant(int n) { + int count = 0; + int size = 100000; + for (int i = 0; i < size; i++) + count++; + return count; + } + ``` + +=== "Go" + + ```go title="time_complexity.go" + /* 常数阶 */ + func constant(n int) int { + count := 0 + size := 100000 + for i := 0; i < size; i++ { + count++ + } + return count + } + ``` + +=== "Swift" + + ```swift title="time_complexity.swift" + /* 常数阶 */ + func constant(n: Int) -> Int { + var count = 0 + let size = 100_000 + for _ in 0 ..< size { + count += 1 + } + return count + } + ``` + +=== "JS" + + ```javascript title="time_complexity.js" + /* 常数阶 */ + function constant(n) { + let count = 0; + const size = 100000; + for (let i = 0; i < size; i++) count++; + return count; + } + ``` + +=== "TS" + + ```typescript title="time_complexity.ts" + /* 常数阶 */ + function constant(n: number): number { + let count = 0; + const size = 100000; + for (let i = 0; i < size; i++) count++; + return count; + } + ``` + +=== "Dart" + + ```dart title="time_complexity.dart" + /* 常数阶 */ + int constant(int n) { + int count = 0; + int size = 100000; + for (var i = 0; i < size; i++) { + count++; + } + return count; + } + ``` + +=== "Rust" + + ```rust title="time_complexity.rs" + /* 常数阶 */ + fn constant(n: i32) -> i32 { + _ = n; + let mut count = 0; + let size = 100_000; + for _ in 0..size { + count += 1; + } + count + } + ``` + +=== "C" + + ```c title="time_complexity.c" + /* 常数阶 */ + int constant(int n) { + int count = 0; + int size = 100000; + int i = 0; + for (int i = 0; i < size; i++) { + count++; + } + return count; + } + ``` + +=== "Zig" + + ```zig title="time_complexity.zig" + // 常数阶 + fn constant(n: i32) i32 { + _ = n; + var count: i32 = 0; + const size: i32 = 100_000; + var i: i32 = 0; + while(iFigure 2-10 Time complexity of constant, linear and quadratic orders
+ +Taking bubble sort as an example, the outer loop executes $n - 1$ times, and the inner loop executes $n-1$, $n-2$, $\dots$, $2$, $1$ times, which averages out to $n / 2$ times, resulting in a time complexity of $O((n - 1) n / 2) = O(n^2)$ . + +=== "Python" + + ```python title="time_complexity.py" + def bubble_sort(nums: list[int]) -> int: + """平方阶(冒泡排序)""" + count = 0 # 计数器 + # 外循环:未排序区间为 [0, i] + for i in range(len(nums) - 1, 0, -1): + # 内循环:将未排序区间 [0, i] 中的最大元素交换至该区间的最右端 + for j in range(i): + if nums[j] > nums[j + 1]: + # 交换 nums[j] 与 nums[j + 1] + tmp: int = nums[j] + nums[j] = nums[j + 1] + nums[j + 1] = tmp + count += 3 # 元素交换包含 3 个单元操作 + return count + ``` + +=== "C++" + + ```cpp title="time_complexity.cpp" + /* 平方阶(冒泡排序) */ + int bubbleSort(vectorFigure 2-11 time complexity of exponential order
+ +In practical algorithms, exponential orders are often found in recursion functions. For example, in the following code, it recursively splits in two and stops after $n$ splits: + +=== "Python" + + ```python title="time_complexity.py" + def exp_recur(n: int) -> int: + """指数阶(递归实现)""" + if n == 1: + return 1 + return exp_recur(n - 1) + exp_recur(n - 1) + 1 + ``` + +=== "C++" + + ```cpp title="time_complexity.cpp" + /* 指数阶(递归实现) */ + int expRecur(int n) { + if (n == 1) + return 1; + return expRecur(n - 1) + expRecur(n - 1) + 1; + } + ``` + +=== "Java" + + ```java title="time_complexity.java" + /* 指数阶(递归实现) */ + int expRecur(int n) { + if (n == 1) + return 1; + return expRecur(n - 1) + expRecur(n - 1) + 1; + } + ``` + +=== "C#" + + ```csharp title="time_complexity.cs" + /* 指数阶(递归实现) */ + int ExpRecur(int n) { + if (n == 1) return 1; + return ExpRecur(n - 1) + ExpRecur(n - 1) + 1; + } + ``` + +=== "Go" + + ```go title="time_complexity.go" + /* 指数阶(递归实现)*/ + func expRecur(n int) int { + if n == 1 { + return 1 + } + return expRecur(n-1) + expRecur(n-1) + 1 + } + ``` + +=== "Swift" + + ```swift title="time_complexity.swift" + /* 指数阶(递归实现) */ + func expRecur(n: Int) -> Int { + if n == 1 { + return 1 + } + return expRecur(n: n - 1) + expRecur(n: n - 1) + 1 + } + ``` + +=== "JS" + + ```javascript title="time_complexity.js" + /* 指数阶(递归实现) */ + function expRecur(n) { + if (n === 1) return 1; + return expRecur(n - 1) + expRecur(n - 1) + 1; + } + ``` + +=== "TS" + + ```typescript title="time_complexity.ts" + /* 指数阶(递归实现) */ + function expRecur(n: number): number { + if (n === 1) return 1; + return expRecur(n - 1) + expRecur(n - 1) + 1; + } + ``` + +=== "Dart" + + ```dart title="time_complexity.dart" + /* 指数阶(递归实现) */ + int expRecur(int n) { + if (n == 1) return 1; + return expRecur(n - 1) + expRecur(n - 1) + 1; + } + ``` + +=== "Rust" + + ```rust title="time_complexity.rs" + /* 指数阶(递归实现) */ + fn exp_recur(n: i32) -> i32 { + if n == 1 { + return 1; + } + exp_recur(n - 1) + exp_recur(n - 1) + 1 + } + ``` + +=== "C" + + ```c title="time_complexity.c" + /* 指数阶(递归实现) */ + int expRecur(int n) { + if (n == 1) + return 1; + return expRecur(n - 1) + expRecur(n - 1) + 1; + } + ``` + +=== "Zig" + + ```zig title="time_complexity.zig" + // 指数阶(递归实现) + fn expRecur(n: i32) i32 { + if (n == 1) return 1; + return expRecur(n - 1) + expRecur(n - 1) + 1; + } + ``` + +Exponential order grows very rapidly and is more common in exhaustive methods (brute force search, backtracking, etc.). For problems with large data sizes, exponential order is unacceptable and usually requires the use of algorithms such as dynamic programming or greedy algorithms to solve. + +### 1. Logarithmic Order $O(\Log N)$ + +In contrast to the exponential order, the logarithmic order reflects the "each round is reduced to half" case. Let the input data size be $n$, and since each round is reduced to half, the number of loops is $\log_2 n$, which is the inverse function of $2^n$. + +The Figure 2-12 and the code below simulate the process of "reducing each round to half" with a time complexity of $O(\log_2 n)$, which is abbreviated as $O(\log n)$. + +=== "Python" + + ```python title="time_complexity.py" + def logarithmic(n: float) -> int: + """对数阶(循环实现)""" + count = 0 + while n > 1: + n = n / 2 + count += 1 + return count + ``` + +=== "C++" + + ```cpp title="time_complexity.cpp" + /* 对数阶(循环实现) */ + int logarithmic(float n) { + int count = 0; + while (n > 1) { + n = n / 2; + count++; + } + return count; + } + ``` + +=== "Java" + + ```java title="time_complexity.java" + /* 对数阶(循环实现) */ + int logarithmic(float n) { + int count = 0; + while (n > 1) { + n = n / 2; + count++; + } + return count; + } + ``` + +=== "C#" + + ```csharp title="time_complexity.cs" + /* 对数阶(循环实现) */ + int Logarithmic(float n) { + int count = 0; + while (n > 1) { + n /= 2; + count++; + } + return count; + } + ``` + +=== "Go" + + ```go title="time_complexity.go" + /* 对数阶(循环实现)*/ + func logarithmic(n float64) int { + count := 0 + for n > 1 { + n = n / 2 + count++ + } + return count + } + ``` + +=== "Swift" + + ```swift title="time_complexity.swift" + /* 对数阶(循环实现) */ + func logarithmic(n: Double) -> Int { + var count = 0 + var n = n + while n > 1 { + n = n / 2 + count += 1 + } + return count + } + ``` + +=== "JS" + + ```javascript title="time_complexity.js" + /* 对数阶(循环实现) */ + function logarithmic(n) { + let count = 0; + while (n > 1) { + n = n / 2; + count++; + } + return count; + } + ``` + +=== "TS" + + ```typescript title="time_complexity.ts" + /* 对数阶(循环实现) */ + function logarithmic(n: number): number { + let count = 0; + while (n > 1) { + n = n / 2; + count++; + } + return count; + } + ``` + +=== "Dart" + + ```dart title="time_complexity.dart" + /* 对数阶(循环实现) */ + int logarithmic(num n) { + int count = 0; + while (n > 1) { + n = n / 2; + count++; + } + return count; + } + ``` + +=== "Rust" + + ```rust title="time_complexity.rs" + /* 对数阶(循环实现) */ + fn logarithmic(mut n: f32) -> i32 { + let mut count = 0; + while n > 1.0 { + n = n / 2.0; + count += 1; + } + count + } + ``` + +=== "C" + + ```c title="time_complexity.c" + /* 对数阶(循环实现) */ + int logarithmic(float n) { + int count = 0; + while (n > 1) { + n = n / 2; + count++; + } + return count; + } + ``` + +=== "Zig" + + ```zig title="time_complexity.zig" + // 对数阶(循环实现) + fn logarithmic(n: f32) i32 { + var count: i32 = 0; + var n_var = n; + while (n_var > 1) + { + n_var = n_var / 2; + count +=1; + } + return count; + } + ``` + +{ class="animation-figure" } + +Figure 2-12 time complexity of logarithmic order
+ +Similar to the exponential order, the logarithmic order is often found in recursion functions. The following code forms a recursion tree of height $\log_2 n$: + +=== "Python" + + ```python title="time_complexity.py" + def log_recur(n: float) -> int: + """对数阶(递归实现)""" + if n <= 1: + return 0 + return log_recur(n / 2) + 1 + ``` + +=== "C++" + + ```cpp title="time_complexity.cpp" + /* 对数阶(递归实现) */ + int logRecur(float n) { + if (n <= 1) + return 0; + return logRecur(n / 2) + 1; + } + ``` + +=== "Java" + + ```java title="time_complexity.java" + /* 对数阶(递归实现) */ + int logRecur(float n) { + if (n <= 1) + return 0; + return logRecur(n / 2) + 1; + } + ``` + +=== "C#" + + ```csharp title="time_complexity.cs" + /* 对数阶(递归实现) */ + int LogRecur(float n) { + if (n <= 1) return 0; + return LogRecur(n / 2) + 1; + } + ``` + +=== "Go" + + ```go title="time_complexity.go" + /* 对数阶(递归实现)*/ + func logRecur(n float64) int { + if n <= 1 { + return 0 + } + return logRecur(n/2) + 1 + } + ``` + +=== "Swift" + + ```swift title="time_complexity.swift" + /* 对数阶(递归实现) */ + func logRecur(n: Double) -> Int { + if n <= 1 { + return 0 + } + return logRecur(n: n / 2) + 1 + } + ``` + +=== "JS" + + ```javascript title="time_complexity.js" + /* 对数阶(递归实现) */ + function logRecur(n) { + if (n <= 1) return 0; + return logRecur(n / 2) + 1; + } + ``` + +=== "TS" + + ```typescript title="time_complexity.ts" + /* 对数阶(递归实现) */ + function logRecur(n: number): number { + if (n <= 1) return 0; + return logRecur(n / 2) + 1; + } + ``` + +=== "Dart" + + ```dart title="time_complexity.dart" + /* 对数阶(递归实现) */ + int logRecur(num n) { + if (n <= 1) return 0; + return logRecur(n / 2) + 1; + } + ``` + +=== "Rust" + + ```rust title="time_complexity.rs" + /* 对数阶(递归实现) */ + fn log_recur(n: f32) -> i32 { + if n <= 1.0 { + return 0; + } + log_recur(n / 2.0) + 1 + } + ``` + +=== "C" + + ```c title="time_complexity.c" + /* 对数阶(递归实现) */ + int logRecur(float n) { + if (n <= 1) + return 0; + return logRecur(n / 2) + 1; + } + ``` + +=== "Zig" + + ```zig title="time_complexity.zig" + // 对数阶(递归实现) + fn logRecur(n: f32) i32 { + if (n <= 1) return 0; + return logRecur(n / 2) + 1; + } + ``` + +Logarithmic order is often found in algorithms based on the divide and conquer strategy, which reflects the algorithmic ideas of "dividing one into many" and "simplifying the complexity into simplicity". It grows slowly and is the second most desirable time complexity after constant order. + +!!! tip "What is the base of $O(\log n)$?" + + To be precise, the corresponding time complexity of "one divided into $m$" is $O(\log_m n)$ . And by using the logarithmic permutation formula, we can get equal time complexity with different bases: + + $$ + O(\log_m n) = O(\log_k n / \log_k m) = O(\log_k n) + $$ + + That is, the base $m$ can be converted without affecting the complexity. Therefore we usually omit the base $m$ and write the logarithmic order directly as $O(\log n)$. + +### 2. Linear Logarithmic Order $O(N \Log N)$ + +The linear logarithmic order is often found in nested loops, and the time complexity of the two levels of loops is $O(\log n)$ and $O(n)$ respectively. The related code is as follows: + +=== "Python" + + ```python title="time_complexity.py" + def linear_log_recur(n: float) -> int: + """线性对数阶""" + if n <= 1: + return 1 + count: int = linear_log_recur(n // 2) + linear_log_recur(n // 2) + for _ in range(n): + count += 1 + return count + ``` + +=== "C++" + + ```cpp title="time_complexity.cpp" + /* 线性对数阶 */ + int linearLogRecur(float n) { + if (n <= 1) + return 1; + int count = linearLogRecur(n / 2) + linearLogRecur(n / 2); + for (int i = 0; i < n; i++) { + count++; + } + return count; + } + ``` + +=== "Java" + + ```java title="time_complexity.java" + /* 线性对数阶 */ + int linearLogRecur(float n) { + if (n <= 1) + return 1; + int count = linearLogRecur(n / 2) + linearLogRecur(n / 2); + for (int i = 0; i < n; i++) { + count++; + } + return count; + } + ``` + +=== "C#" + + ```csharp title="time_complexity.cs" + /* 线性对数阶 */ + int LinearLogRecur(float n) { + if (n <= 1) return 1; + int count = LinearLogRecur(n / 2) + LinearLogRecur(n / 2); + for (int i = 0; i < n; i++) { + count++; + } + return count; + } + ``` + +=== "Go" + + ```go title="time_complexity.go" + /* 线性对数阶 */ + func linearLogRecur(n float64) int { + if n <= 1 { + return 1 + } + count := linearLogRecur(n/2) + linearLogRecur(n/2) + for i := 0.0; i < n; i++ { + count++ + } + return count + } + ``` + +=== "Swift" + + ```swift title="time_complexity.swift" + /* 线性对数阶 */ + func linearLogRecur(n: Double) -> Int { + if n <= 1 { + return 1 + } + var count = linearLogRecur(n: n / 2) + linearLogRecur(n: n / 2) + for _ in stride(from: 0, to: n, by: 1) { + count += 1 + } + return count + } + ``` + +=== "JS" + + ```javascript title="time_complexity.js" + /* 线性对数阶 */ + function linearLogRecur(n) { + if (n <= 1) return 1; + let count = linearLogRecur(n / 2) + linearLogRecur(n / 2); + for (let i = 0; i < n; i++) { + count++; + } + return count; + } + ``` + +=== "TS" + + ```typescript title="time_complexity.ts" + /* 线性对数阶 */ + function linearLogRecur(n: number): number { + if (n <= 1) return 1; + let count = linearLogRecur(n / 2) + linearLogRecur(n / 2); + for (let i = 0; i < n; i++) { + count++; + } + return count; + } + ``` + +=== "Dart" + + ```dart title="time_complexity.dart" + /* 线性对数阶 */ + int linearLogRecur(num n) { + if (n <= 1) return 1; + int count = linearLogRecur(n / 2) + linearLogRecur(n / 2); + for (var i = 0; i < n; i++) { + count++; + } + return count; + } + ``` + +=== "Rust" + + ```rust title="time_complexity.rs" + /* 线性对数阶 */ + fn linear_log_recur(n: f32) -> i32 { + if n <= 1.0 { + return 1; + } + let mut count = linear_log_recur(n / 2.0) + linear_log_recur(n / 2.0); + for _ in 0 ..n as i32 { + count += 1; + } + return count + } + ``` + +=== "C" + + ```c title="time_complexity.c" + /* 线性对数阶 */ + int linearLogRecur(float n) { + if (n <= 1) + return 1; + int count = linearLogRecur(n / 2) + linearLogRecur(n / 2); + for (int i = 0; i < n; i++) { + count++; + } + return count; + } + ``` + +=== "Zig" + + ```zig title="time_complexity.zig" + // 线性对数阶 + fn linearLogRecur(n: f32) i32 { + if (n <= 1) return 1; + var count: i32 = linearLogRecur(n / 2) + linearLogRecur(n / 2); + var i: f32 = 0; + while (i < n) : (i += 1) { + count += 1; + } + return count; + } + ``` + +The Figure 2-13 shows how the linear logarithmic order is generated. The total number of operations at each level of the binary tree is $n$ , and the tree has a total of $\log_2 n + 1$ levels, resulting in a time complexity of $O(n\log n)$ . + +{ class="animation-figure" } + +Figure 2-13 Time complexity of linear logarithmic order
+ +Mainstream sorting algorithms typically have a time complexity of $O(n \log n)$ , such as quick sort, merge sort, heap sort, etc. + +### 3. The Factorial Order $O(N!)$ + +The factorial order corresponds to the mathematical "permutations problem". Given $n$ elements that do not repeat each other, find all possible permutations of them, the number of permutations being: + +$$ +n! = n \times (n - 1) \times (n - 2) \times \dots \times 2 \times 1 +$$ + +Factorials are usually implemented using recursion. As shown in the Figure 2-14 and in the code below, the first level splits $n$, the second level splits $n - 1$, and so on, until the splitting stops at the $n$th level: + +=== "Python" + + ```python title="time_complexity.py" + def factorial_recur(n: int) -> int: + """阶乘阶(递归实现)""" + if n == 0: + return 1 + count = 0 + # 从 1 个分裂出 n 个 + for _ in range(n): + count += factorial_recur(n - 1) + return count + ``` + +=== "C++" + + ```cpp title="time_complexity.cpp" + /* 阶乘阶(递归实现) */ + int factorialRecur(int n) { + if (n == 0) + return 1; + int count = 0; + // 从 1 个分裂出 n 个 + for (int i = 0; i < n; i++) { + count += factorialRecur(n - 1); + } + return count; + } + ``` + +=== "Java" + + ```java title="time_complexity.java" + /* 阶乘阶(递归实现) */ + int factorialRecur(int n) { + if (n == 0) + return 1; + int count = 0; + // 从 1 个分裂出 n 个 + for (int i = 0; i < n; i++) { + count += factorialRecur(n - 1); + } + return count; + } + ``` + +=== "C#" + + ```csharp title="time_complexity.cs" + /* 阶乘阶(递归实现) */ + int FactorialRecur(int n) { + if (n == 0) return 1; + int count = 0; + // 从 1 个分裂出 n 个 + for (int i = 0; i < n; i++) { + count += FactorialRecur(n - 1); + } + return count; + } + ``` + +=== "Go" + + ```go title="time_complexity.go" + /* 阶乘阶(递归实现) */ + func factorialRecur(n int) int { + if n == 0 { + return 1 + } + count := 0 + // 从 1 个分裂出 n 个 + for i := 0; i < n; i++ { + count += factorialRecur(n - 1) + } + return count + } + ``` + +=== "Swift" + + ```swift title="time_complexity.swift" + /* 阶乘阶(递归实现) */ + func factorialRecur(n: Int) -> Int { + if n == 0 { + return 1 + } + var count = 0 + // 从 1 个分裂出 n 个 + for _ in 0 ..< n { + count += factorialRecur(n: n - 1) + } + return count + } + ``` + +=== "JS" + + ```javascript title="time_complexity.js" + /* 阶乘阶(递归实现) */ + function factorialRecur(n) { + if (n === 0) return 1; + let count = 0; + // 从 1 个分裂出 n 个 + for (let i = 0; i < n; i++) { + count += factorialRecur(n - 1); + } + return count; + } + ``` + +=== "TS" + + ```typescript title="time_complexity.ts" + /* 阶乘阶(递归实现) */ + function factorialRecur(n: number): number { + if (n === 0) return 1; + let count = 0; + // 从 1 个分裂出 n 个 + for (let i = 0; i < n; i++) { + count += factorialRecur(n - 1); + } + return count; + } + ``` + +=== "Dart" + + ```dart title="time_complexity.dart" + /* 阶乘阶(递归实现) */ + int factorialRecur(int n) { + if (n == 0) return 1; + int count = 0; + // 从 1 个分裂出 n 个 + for (var i = 0; i < n; i++) { + count += factorialRecur(n - 1); + } + return count; + } + ``` + +=== "Rust" + + ```rust title="time_complexity.rs" + /* 阶乘阶(递归实现) */ + fn factorial_recur(n: i32) -> i32 { + if n == 0 { + return 1; + } + let mut count = 0; + // 从 1 个分裂出 n 个 + for _ in 0..n { + count += factorial_recur(n - 1); + } + count + } + ``` + +=== "C" + + ```c title="time_complexity.c" + /* 阶乘阶(递归实现) */ + int factorialRecur(int n) { + if (n == 0) + return 1; + int count = 0; + for (int i = 0; i < n; i++) { + count += factorialRecur(n - 1); + } + return count; + } + ``` + +=== "Zig" + + ```zig title="time_complexity.zig" + // 阶乘阶(递归实现) + fn factorialRecur(n: i32) i32 { + if (n == 0) return 1; + var count: i32 = 0; + var i: i32 = 0; + // 从 1 个分裂出 n 个 + while (i < n) : (i += 1) { + count += factorialRecur(n - 1); + } + return count; + } + ``` + +{ class="animation-figure" } + +Figure 2-14 Time complexity of the factorial order
+ +Note that since there is always $n! > 2^n$ when $n \geq 4$, the factorial order grows faster than the exponential order, and is also unacceptable when $n$ is large. + +## 2.3.6 Worst, Best, Average Time Complexity + +**The time efficiency of algorithms is often not fixed, but is related to the distribution of the input data**. Suppose an array `nums` of length $n$ is input, where `nums` consists of numbers from $1$ to $n$, each of which occurs only once; however, the order of the elements is randomly upset, and the goal of the task is to return the index of element $1$. We can draw the following conclusion. + +- When `nums = [? , ? , ... , 1]` , i.e., when the end element is $1$, a complete traversal of the array is required, **to reach the worst time complexity $O(n)$** . +- When `nums = [1, ? , ? , ...]` , i.e., when the first element is $1$ , there is no need to continue traversing the array no matter how long it is, **reaching the optimal time complexity $\Omega(1)$** . + +The "worst time complexity" corresponds to the asymptotic upper bound of the function and is denoted by the large $O$ notation. Correspondingly, the "optimal time complexity" corresponds to the asymptotic lower bound of the function and is denoted in $\Omega$ notation: + +=== "Python" + + ```python title="worst_best_time_complexity.py" + def random_numbers(n: int) -> list[int]: + """生成一个数组,元素为: 1, 2, ..., n ,顺序被打乱""" + # 生成数组 nums =: 1, 2, 3, ..., n + nums = [i for i in range(1, n + 1)] + # 随机打乱数组元素 + random.shuffle(nums) + return nums + + def find_one(nums: list[int]) -> int: + """查找数组 nums 中数字 1 所在索引""" + for i in range(len(nums)): + # 当元素 1 在数组头部时,达到最佳时间复杂度 O(1) + # 当元素 1 在数组尾部时,达到最差时间复杂度 O(n) + if nums[i] == 1: + return i + return -1 + ``` + +=== "C++" + + ```cpp title="worst_best_time_complexity.cpp" + /* 生成一个数组,元素为 { 1, 2, ..., n },顺序被打乱 */ + vectorFigure 1-1 Dictionary search step
+ +The skill of looking up a dictionary, essential for elementary school students, is actually the renowned binary search algorithm. Through the lens of data structures, we can view the dictionary as a sorted "array"; while from an algorithmic perspective, the series of operations in looking up a dictionary can be seen as "binary search". + +**Example 2: Organizing Playing Cards**. When playing cards, we need to arrange the cards in ascending order each game, as shown in the following process. + +1. Divide the playing cards into "ordered" and "unordered" parts, assuming initially that the leftmost card is already ordered. +2. Take out a card from the unordered part and insert it into the correct position in the ordered part; once completed, the leftmost two cards will be in an ordered sequence. +3. Continue the loop described in step `2.`, each iteration involving insertion of one card from the unordered segment into the ordered portion, until all cards are appropriately ordered. + +{ class="animation-figure" } + +Figure 1-2 Playing cards sorting process
+ +The above method of organizing playing cards is essentially the "insertion sort" algorithm, which is very efficient for small datasets. Many programming languages' sorting library functions include insertion sort. + +**Example 3: Making Change**. Suppose we buy goods worth $69$ yuan at a supermarket and give the cashier $100$ yuan, then the cashier needs to give us $31$ yuan in change. They would naturally complete the thought process as shown below. + +1. The options are currencies smaller than $31$, including $1$, $5$, $10$, and $20$. +2. Take out the largest $20$ from the options, leaving $31 - 20 = 11$. +3. Take out the largest $10$ from the remaining options, leaving $11 - 10 = 1$. +4. Take out the largest $1$ from the remaining options, leaving $1 - 1 = 0$. +5. Complete the change-making, with the solution being $20 + 10 + 1 = 31$. + +{ class="animation-figure" } + +Figure 1-3 Change making process
+ +In the aforementioned steps, at each stage, we make the optimal choice (utilizing the highest denomination possible), ultimately deriving at a feasible change-making approach. From the perspective of data structures and algorithms, this approach is essentially a "greedy" algorithm. + +From preparing a dish to traversing interstellar realms, virtually every problem-solving endeavor relies on algorithms. The emergence of computers enables us to store data structures in memory and write code to call CPUs and GPUs to execute algorithms. Consequently, we can transfer real-life predicaments to computers, efficiently addressing a myriad of complex issues. + +!!! tip + + If concepts such as data structures, algorithms, arrays, and binary search still seem somewhat obsecure, I encourage you to continue reading. This book will gently guide you into the realm of understanding data structures and algorithms. diff --git a/docs-en/chapter_introduction/index.md b/docs-en/chapter_introduction/index.md new file mode 100644 index 000000000..d7e0b32c8 --- /dev/null +++ b/docs-en/chapter_introduction/index.md @@ -0,0 +1,24 @@ +--- +comments: true +icon: material/calculator-variant-outline +--- + +# 第 1 章 Introduction to Algorithms + +Figure 1-4 Relationship between data structures and algorithms
+ +Data structures and algorithms can be likened to a set of building blocks, as illustrated in the Figure 1-5 . A building block set includes numerous pieces, accompanied by detailed assembly instructions. Following these instructions step by step allows us to construct an intricate block model. + +{ class="animation-figure" } + +Figure 1-5 Assembling blocks
+ +The detailed correspondence between the two is shown in the Table 1-1 . + +Table 1-1 Comparing Data Structures and Algorithms to Building Blocks
+ +Figure 0-1 Hello Algo content structure
+ +## 0.1.3 Acknowledgements + +During the creation of this book, I received help from many people, including but not limited to: + +- Thank you to my mentor at the company, Dr. Shih Lee, for encouraging me to "get moving" during one of our conversations, which strengthened my resolve to write this book. +- I would like to thank my girlfriend Bubbles for being the first reader of this book, and for making many valuable suggestions from the perspective of an algorithm whiz, making this book more suitable for newbies. +- Thanks to Tengbao, Qibao, and Feibao for coming up with a creative name for this book that evokes fond memories of writing the first line of code "Hello World!". +- Thanks to Sutong for designing the beautiful cover and logo for this book and patiently revising it many times under my OCD. +- Thanks to @squidfunk for writing layout suggestions and for developing the open source documentation theme [Material-for-MkDocs](https://github.com/squidfunk/mkdocs-material/tree/master). + +During the writing process, I read many textbooks and articles on data structures and algorithms. These works provide excellent models for this book and ensure the accuracy and quality of its contents. I would like to thank all my teachers and predecessors for their outstanding contributions! + +This book promotes a hands-on approach to learning, and in this respect is heavily inspired by ["Hands-On Learning for Depth"](https://github.com/d2l-ai/d2l-zh). I highly recommend this excellent book to all readers. + +A heartfelt thank you to my parents, it is your constant support and encouragement that gives me the opportunity to do this fun-filled thing. diff --git a/docs-en/chapter_preface/index.md b/docs-en/chapter_preface/index.md new file mode 100644 index 000000000..44ac8bfcc --- /dev/null +++ b/docs-en/chapter_preface/index.md @@ -0,0 +1,24 @@ +--- +comments: true +icon: material/book-open-outline +--- + +# 第 0 章 Preface + +Figure 0-2 Example animation
+ +## 0.2.3 Deeper Understanding In Code Practice + +The companion code for this book is hosted in the [GitHub repository](https://github.com/krahets/hello-algo). As shown in the Figure 0-3 , **the source code is accompanied by test samples that can be run with a single click**. + +If time permits, **it is recommended that you refer to the code and knock it through on your own**. If you have limited time to study, please read through and run all the code at least once. + +The process of writing code is often more rewarding than reading it. **Learning by doing is really learning**. + +{ class="animation-figure" } + +Figure 0-3 Running code example
+ +The preliminaries for running the code are divided into three main steps. + +**Step 1: Install the local programming environment**. Please refer to [Appendix Tutorial](https://www.hello-algo.com/chapter_appendix/installation/) for installation, or skip this step if already installed. + +**Step 2: Clone or download the code repository**. If [Git](https://git-scm.com/downloads) is already installed, you can clone this repository with the following command. + +```shell +git clone https://github.com/krahets/hello-algo.git +``` + +Of course, you can also in the location shown in the Figure 0-4 , click "Download ZIP" directly download the code zip, and then in the local solution. + +{ class="animation-figure" } + +Figure 0-4 Clone repository with download code
+ +**Step 3: Run the source code**. As shown in the Figure 0-5 , for the code block labeled with the file name at the top, we can find the corresponding source code file in the `codes` folder of the repository. The source code files can be run with a single click, which will help you save unnecessary debugging time and allow you to focus on what you are learning. + +{ class="animation-figure" } + +Figure 0-5 Code block with corresponding source file
+ +## 0.2.4 Growing Together In Questioning And Discussion + +While reading this book, please don't skip over the points that you didn't learn. **Feel free to ask your questions in the comment section**. We will be happy to answer them and can usually respond within two days. + +As you can see in the Figure 0-6 , each post comes with a comment section at the bottom. I hope you'll pay more attention to the comments section. On the one hand, you can learn about the problems that people encounter, so as to check the gaps and stimulate deeper thinking. On the other hand, we expect you to generously answer other partners' questions, share your insights, and help others improve. + +{ class="animation-figure" } + +Figure 0-6 Example of comment section
+ +## 0.2.5 Algorithm Learning Route + +From a general point of view, we can divide the process of learning data structures and algorithms into three stages. + +1. **Introduction to Algorithms**. We need to familiarize ourselves with the characteristics and usage of various data structures and learn about the principles, processes, uses and efficiency of different algorithms. +2. **Brush up on algorithm questions**. It is recommended to start brushing from popular topics, such as [Sword to Offer](https://leetcode.cn/studyplan/coding-interviews/) and [LeetCode Hot 100](https://leetcode.cn/studyplan/top-100- liked/), first accumulate at least 100 questions to familiarize yourself with mainstream algorithmic problems. Forgetfulness can be a challenge when first brushing up, but rest assured that this is normal. We can follow the "Ebbinghaus Forgetting Curve" to review the questions, and usually after 3-5 rounds of repetitions, we will be able to memorize them. +3. **Build the knowledge system**. In terms of learning, we can read algorithm column articles, solution frameworks and algorithm textbooks to continuously enrich the knowledge system. In terms of brushing, we can try to adopt advanced brushing strategies, such as categorizing by topic, multiple solutions, multiple solutions, etc. Related brushing tips can be found in various communities. + +As shown in the Figure 0-7 , this book mainly covers "Phase 1" and is designed to help you start Phase 2 and 3 more efficiently. + +{ class="animation-figure" } + +Figure 0-7 algorithm learning route
diff --git a/docs-en/chapter_preface/summary.md b/docs-en/chapter_preface/summary.md new file mode 100644 index 000000000..4a364c76c --- /dev/null +++ b/docs-en/chapter_preface/summary.md @@ -0,0 +1,12 @@ +--- +comments: true +--- + +# 0.3 Summary + +- The main audience of this book is beginners in algorithm. If you already have some basic knowledge, this book can help you systematically review your algorithm knowledge, and the source code in this book can also be used as a "Coding Toolkit". +- The book consists of three main sections, Complexity Analysis, Data Structures, and Algorithms, covering most of the topics in the field. +- For newcomers to algorithms, it is crucial to read an introductory book in the beginning stages to avoid many detours or common pitfalls. +- Animations and graphs within the book are usually used to introduce key points and difficult knowledge. These should be given more attention when reading the book. +- Practice is the best way to learn programming. It is highly recommended that you run the source code and type in the code yourself. +- Each chapter in the web version of this book features a discussion forum, and you are welcome to share your questions and insights at any time. \ No newline at end of file diff --git a/docs-en/index.assets/btn_chinese_edition.svg b/docs-en/index.assets/btn_chinese_edition.svg new file mode 100644 index 000000000..f8a4b21d7 --- /dev/null +++ b/docs-en/index.assets/btn_chinese_edition.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs-en/index.assets/btn_chinese_edition_dark.svg b/docs-en/index.assets/btn_chinese_edition_dark.svg new file mode 100644 index 000000000..e96c12f38 --- /dev/null +++ b/docs-en/index.assets/btn_chinese_edition_dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs-en/index.assets/btn_download_pdf.svg b/docs-en/index.assets/btn_download_pdf.svg new file mode 100644 index 000000000..e486da953 --- /dev/null +++ b/docs-en/index.assets/btn_download_pdf.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs-en/index.assets/btn_download_pdf_dark.svg b/docs-en/index.assets/btn_download_pdf_dark.svg new file mode 100644 index 000000000..99266fde5 --- /dev/null +++ b/docs-en/index.assets/btn_download_pdf_dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs-en/index.assets/btn_read_online.svg b/docs-en/index.assets/btn_read_online.svg new file mode 100644 index 000000000..ba1d2c684 --- /dev/null +++ b/docs-en/index.assets/btn_read_online.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs-en/index.assets/btn_read_online_dark.svg b/docs-en/index.assets/btn_read_online_dark.svg new file mode 100644 index 000000000..debaba56c --- /dev/null +++ b/docs-en/index.assets/btn_read_online_dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs-en/index.md b/docs-en/index.md new file mode 100644 index 000000000..305ff4c89 --- /dev/null +++ b/docs-en/index.md @@ -0,0 +1,173 @@ +--- +comments: true +glightbox: false +hide: + - footer + - toc + - edit +--- + +Data Structures and Algorithms Crash Course with Animated Illustrations and Off-the-Shelf Code
+ + + ++ + + Dive In + + + + Clone Repo + + + + Get PDF + +
+ +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Quote
+"An easy-to-understand book on data structures and algorithms, which guides readers to learn by minds-on and hands-on. Strongly recommended for algorithm beginners!"
+—— Junhui Deng, Professor of Computer Science, Tsinghua University
+Quote
+"If I had 'Hello Algo' when I was learning data structures and algorithms, it would have been 10 times easier!"
+—— Mu Li, Senior Principal Scientist, Amazon
+Easy to understandSmooth learning curve
+"A picture is worth a thousand words."
+Multi programming languagesRun with one click
+"Talk is cheap. Show me the code."
+Discussion and questions welcomeReaders progress together
+"Chase the wind and moon, never stopping"
+"Beyond the plains, there are spring mountains"
+
+
+
+
+