Better names for variables [closes #18]
This commit is contained in:
parent
baee83266a
commit
11564fd4d3
|
@ -480,7 +480,7 @@ efficiently implemented using them.
|
|||
A convenient way to store the adjacency lists is to declare
|
||||
an array of vectors as follows:
|
||||
\begin{lstlisting}
|
||||
vector<int> v[N];
|
||||
vector<int> adj[N];
|
||||
\end{lstlisting}
|
||||
|
||||
The constant $N$ is chosen so that all
|
||||
|
@ -503,11 +503,11 @@ For example, the graph
|
|||
\end{center}
|
||||
can be stored as follows:
|
||||
\begin{lstlisting}
|
||||
v[1].push_back(2);
|
||||
v[2].push_back(3);
|
||||
v[2].push_back(4);
|
||||
v[3].push_back(4);
|
||||
v[4].push_back(1);
|
||||
adj[1].push_back(2);
|
||||
adj[2].push_back(3);
|
||||
adj[2].push_back(4);
|
||||
adj[3].push_back(4);
|
||||
adj[4].push_back(1);
|
||||
\end{lstlisting}
|
||||
|
||||
If the graph is undirected, it can be stored in a similar way,
|
||||
|
@ -517,7 +517,7 @@ For a weighted graph, the structure can be extended
|
|||
as follows:
|
||||
|
||||
\begin{lstlisting}
|
||||
vector<pair<int,int>> v[N];
|
||||
vector<pair<int,int>> adj[N];
|
||||
\end{lstlisting}
|
||||
|
||||
If there is an edge from node $a$ to node $b$
|
||||
|
@ -541,11 +541,11 @@ For example, the graph
|
|||
\end{center}
|
||||
can be stored as follows:
|
||||
\begin{lstlisting}
|
||||
v[1].push_back({2,5});
|
||||
v[2].push_back({3,7});
|
||||
v[2].push_back({4,6});
|
||||
v[3].push_back({4,5});
|
||||
v[4].push_back({1,2});
|
||||
adj[1].push_back({2,5});
|
||||
adj[2].push_back({3,7});
|
||||
adj[2].push_back({4,6});
|
||||
adj[3].push_back({4,5});
|
||||
adj[4].push_back({1,2});
|
||||
\end{lstlisting}
|
||||
|
||||
The benefit in using adjacency lists is that
|
||||
|
@ -555,7 +555,7 @@ For example, the following loop goes through all nodes
|
|||
to which we can move from node $s$:
|
||||
|
||||
\begin{lstlisting}
|
||||
for (auto u : v[s]) {
|
||||
for (auto u : adj[s]) {
|
||||
// process node u
|
||||
}
|
||||
\end{lstlisting}
|
||||
|
@ -570,14 +570,14 @@ We can efficiently check from an adjacency matrix
|
|||
if there is an edge between two nodes.
|
||||
The matrix can be stored as an array
|
||||
\begin{lstlisting}
|
||||
int v[N][N];
|
||||
int mat[N][N];
|
||||
\end{lstlisting}
|
||||
where each value $\texttt{v}[a][b]$ indicates
|
||||
where each value $\texttt{mat}[a][b]$ indicates
|
||||
whether the graph contains an edge from
|
||||
node $a$ to node $b$.
|
||||
If the edge is included in the graph,
|
||||
then $\texttt{v}[a][b]=1$,
|
||||
and otherwise $\texttt{v}[a][b]=0$.
|
||||
then $\texttt{mat}[a][b]=1$,
|
||||
and otherwise $\texttt{mat}[a][b]=0$.
|
||||
For example, the graph
|
||||
\begin{center}
|
||||
\begin{tikzpicture}[scale=0.9]
|
||||
|
@ -696,7 +696,7 @@ at a given node.
|
|||
|
||||
The edge list can be stored in a vector
|
||||
\begin{lstlisting}
|
||||
vector<pair<int,int>> v;
|
||||
vector<pair<int,int>> edges;
|
||||
\end{lstlisting}
|
||||
where each pair $(a,b)$ denotes that
|
||||
there is an edge from node $a$ to node $b$.
|
||||
|
@ -718,18 +718,18 @@ Thus, the graph
|
|||
\end{center}
|
||||
can be represented as follows:
|
||||
\begin{lstlisting}
|
||||
v.push_back({1,2});
|
||||
v.push_back({2,3});
|
||||
v.push_back({2,4});
|
||||
v.push_back({3,4});
|
||||
v.push_back({4,1});
|
||||
edges.push_back({1,2});
|
||||
edges.push_back({2,3});
|
||||
edges.push_back({2,4});
|
||||
edges.push_back({3,4});
|
||||
edges.push_back({4,1});
|
||||
\end{lstlisting}
|
||||
|
||||
\noindent
|
||||
If the graph is weighted, the structure can
|
||||
be extended as follows:
|
||||
\begin{lstlisting}
|
||||
vector<tuple<int,int,int>> v;
|
||||
vector<tuple<int,int,int>> edges;
|
||||
\end{lstlisting}
|
||||
Each element in this list is of the
|
||||
form $(a,b,w)$, which means that there
|
||||
|
@ -753,10 +753,10 @@ For example, the graph
|
|||
\begin{samepage}
|
||||
can be represented as follows:
|
||||
\begin{lstlisting}
|
||||
v.push_back(make_tuple(1,2,5));
|
||||
v.push_back(make_tuple(2,3,7));
|
||||
v.push_back(make_tuple(2,4,6));
|
||||
v.push_back(make_tuple(3,4,5));
|
||||
v.push_back(make_tuple(4,1,2));
|
||||
edges.push_back({1,2,5});
|
||||
edges.push_back({2,3,7});
|
||||
edges.push_back({2,4,6});
|
||||
edges.push_back({3,4,5});
|
||||
edges.push_back({4,1,2});
|
||||
\end{lstlisting}
|
||||
\end{samepage}
|
||||
|
|
|
@ -129,23 +129,23 @@ a depth-first search at a given node.
|
|||
The function assumes that the graph is
|
||||
stored as adjacency lists in an array
|
||||
\begin{lstlisting}
|
||||
vector<int> v[N];
|
||||
vector<int> adj[N];
|
||||
\end{lstlisting}
|
||||
and also maintains an array
|
||||
\begin{lstlisting}
|
||||
bool visited[N];
|
||||
bool vis[N];
|
||||
\end{lstlisting}
|
||||
that keeps track of the visited nodes.
|
||||
Initially, each array value is \texttt{false},
|
||||
and when the search arrives at node $s$,
|
||||
the value of \texttt{visited}[$s$] becomes \texttt{true}.
|
||||
the value of \texttt{vis}[$s$] becomes \texttt{true}.
|
||||
The function can be implemented as follows:
|
||||
\begin{lstlisting}
|
||||
void dfs(int s) {
|
||||
if (visited[s]) return;
|
||||
visited[s] = true;
|
||||
if (vis[s]) return;
|
||||
vis[s] = true;
|
||||
// process node s
|
||||
for (auto u: v[s]) {
|
||||
for (auto u: adj[s]) {
|
||||
dfs(u);
|
||||
}
|
||||
}
|
||||
|
@ -313,8 +313,8 @@ as adjacency lists and maintains the following
|
|||
data structures:
|
||||
\begin{lstlisting}
|
||||
queue<int> q;
|
||||
bool visited[N];
|
||||
int distance[N];
|
||||
bool vis[N];
|
||||
int dist[N];
|
||||
\end{lstlisting}
|
||||
|
||||
The queue \texttt{q}
|
||||
|
@ -323,24 +323,24 @@ of their distance.
|
|||
New nodes are always added to the end
|
||||
of the queue, and the node at the beginning
|
||||
of the queue is the next node to be processed.
|
||||
The array \texttt{visited} indicates
|
||||
The array \texttt{vis} indicates
|
||||
which nodes the search has already visited,
|
||||
and the array \texttt{distance} will contain the
|
||||
and the array \texttt{dist} will contain the
|
||||
distances to all nodes in the graph.
|
||||
|
||||
The search can be implemented as follows,
|
||||
starting at node $x$:
|
||||
\begin{lstlisting}
|
||||
visited[x] = true;
|
||||
distance[x] = 0;
|
||||
vis[x] = true;
|
||||
dist[x] = 0;
|
||||
q.push(x);
|
||||
while (!q.empty()) {
|
||||
int s = q.front(); q.pop();
|
||||
// process node s
|
||||
for (auto u : v[s]) {
|
||||
if (visited[u]) continue;
|
||||
visited[u] = true;
|
||||
distance[u] = distance[s]+1;
|
||||
for (auto u : adj[s]) {
|
||||
if (vis[u]) continue;
|
||||
vis[u] = true;
|
||||
dist[u] = dist[s]+1;
|
||||
q.push(u);
|
||||
}
|
||||
}
|
||||
|
|
139
chapter13.tex
139
chapter13.tex
|
@ -193,12 +193,10 @@ The following implementation of the
|
|||
Bellman–Ford algorithm finds the shortest distances
|
||||
from a node $x$ to all other nodes in the graph.
|
||||
The code assumes that the graph is stored
|
||||
as adjacency lists in an array
|
||||
\begin{lstlisting}
|
||||
vector<pair<int,int>> v[N];
|
||||
\end{lstlisting}
|
||||
as pairs of the form $(x,w)$:
|
||||
there is an edge to node $x$ with weight $w$.
|
||||
as an edge list \texttt{edges}
|
||||
that consists of tuples of the form $(a,b,w)$,
|
||||
meaning that there is an edge from node $a$ to node $b$
|
||||
with weight $w$.
|
||||
|
||||
The algorithm consists of $n-1$ rounds,
|
||||
and on each round the algorithm goes through
|
||||
|
@ -210,14 +208,13 @@ to all nodes in the graph.
|
|||
The constant \texttt{INF} denotes an infinite distance.
|
||||
|
||||
\begin{lstlisting}
|
||||
for (int i = 1; i <= n; i++) distance[i] = INF;
|
||||
distance[x] = 0;
|
||||
for (int i = 1; i <= n; i++) dist[i] = INF;
|
||||
dist[x] = 0;
|
||||
for (int i = 1; i <= n-1; i++) {
|
||||
for (int a = 1; a <= n; a++) {
|
||||
for (auto b : v[a]) {
|
||||
distance[b.first] = min(distance[b.first],
|
||||
distance[a]+b.second);
|
||||
}
|
||||
for (auto e : edges) {
|
||||
int a, b, w;
|
||||
tie(a, b, w) = e;
|
||||
dist[b] = min(dist[b], dist[a]+w);
|
||||
}
|
||||
}
|
||||
\end{lstlisting}
|
||||
|
@ -298,29 +295,29 @@ Then, the algorithm always processes the
|
|||
first node in the queue, and when an edge
|
||||
$a \rightarrow b$ reduces a distance,
|
||||
node $b$ is added to the queue.
|
||||
|
||||
The following implementation uses a
|
||||
\texttt{queue} \texttt{q}.
|
||||
In addition, an array \texttt{inqueue} indicates
|
||||
if a node is already in the queue,
|
||||
in which case the algorithm does not add
|
||||
the node to the queue again.
|
||||
|
||||
\begin{lstlisting}
|
||||
for (int i = 1; i <= n; i++) distance[i] = INF;
|
||||
distance[x] = 0;
|
||||
q.push(x);
|
||||
while (!q.empty()) {
|
||||
int a = q.front(); q.pop();
|
||||
inqueue[a] = false;
|
||||
for (auto b : v[a]) {
|
||||
if (distance[a]+b.second < distance[b.first]) {
|
||||
distance[b.first] = distance[a]+b.second;
|
||||
if (!inqueue[b]) {q.push(b); inqueue[b] = true;}
|
||||
}
|
||||
}
|
||||
}
|
||||
\end{lstlisting}
|
||||
%
|
||||
% The following implementation uses a
|
||||
% \texttt{queue} \texttt{q}.
|
||||
% In addition, an array \texttt{inqueue} indicates
|
||||
% if a node is already in the queue,
|
||||
% in which case the algorithm does not add
|
||||
% the node to the queue again.
|
||||
%
|
||||
% \begin{lstlisting}
|
||||
% for (int i = 1; i <= n; i++) distance[i] = INF;
|
||||
% distance[x] = 0;
|
||||
% q.push(x);
|
||||
% while (!q.empty()) {
|
||||
% int a = q.front(); q.pop();
|
||||
% inqueue[a] = false;
|
||||
% for (auto b : v[a]) {
|
||||
% if (distance[a]+b.second < distance[b.first]) {
|
||||
% distance[b.first] = distance[a]+b.second;
|
||||
% if (!inqueue[b]) {q.push(b); inqueue[b] = true;}
|
||||
% }
|
||||
% }
|
||||
% }
|
||||
% \end{lstlisting}
|
||||
|
||||
The efficiency of the SPFA algorithm depends
|
||||
on the structure of the graph:
|
||||
|
@ -542,8 +539,10 @@ compensates the previous large weight $6$.
|
|||
The following implementation of Dijkstra's algorithm
|
||||
calculates the minimum distances from a node $x$
|
||||
to all other nodes.
|
||||
The graph is stored in an array \texttt{v}
|
||||
as adjacency lists like in the Bellman–Ford algorithm.
|
||||
The graph is stored as adjacency lists
|
||||
so that \texttt{adj[$a$]} contains a pair $(b,w)$
|
||||
always when there is an edge from node $a$ to node $b$
|
||||
with weight $w$.
|
||||
|
||||
An efficient implementation of Dijkstra's algorithm
|
||||
requires that it is possible to efficiently find the
|
||||
|
@ -553,43 +552,41 @@ that contains the nodes ordered by their distances.
|
|||
Using a priority queue, the next node to be processed
|
||||
can be retrieved in logarithmic time.
|
||||
|
||||
In the following implementation,
|
||||
the priority queue contains pairs whose first
|
||||
element is the current distance to the node and second
|
||||
element is the identifier of the node.
|
||||
\begin{lstlisting}
|
||||
priority_queue<pair<int,int>> q;
|
||||
\end{lstlisting}
|
||||
A small difficulty is that in Dijkstra's algorithm,
|
||||
we should find the node with the \emph{minimum} distance,
|
||||
while the C++ priority queue finds the \emph{maximum}
|
||||
element by default.
|
||||
An easy trick is to use \emph{negative} distances,
|
||||
which allows us to directly use the C++ priority queue.
|
||||
|
||||
The code keeps track of processed nodes
|
||||
in an array \texttt{ready},
|
||||
and maintains the distances in an array \texttt{distance}.
|
||||
Initially, the distance to the starting node is 0,
|
||||
and the distance to all other nodes is infinite.
|
||||
The following implementation uses a priority queue
|
||||
\texttt{q} that contains pairs of the form $(-d,x)$:
|
||||
the current distance to node $x$ is $d$.
|
||||
The array $\texttt{dist}$ contains the distance to
|
||||
each node, and the array $\texttt{ready}$ indicates
|
||||
whether a node has been processed.
|
||||
Initially the distance to $0$ to $x$ and $\infty$ to all other nodes.
|
||||
|
||||
\begin{lstlisting}
|
||||
for (int i = 1; i <= n; i++) distance[i] = INF;
|
||||
distance[x] = 0;
|
||||
for (int i = 1; i <= n; i++) dist[i] = INF;
|
||||
dist[x] = 0;
|
||||
q.push({0,x});
|
||||
while (!q.empty()) {
|
||||
int a = q.top().second; q.pop();
|
||||
if (ready[a]) continue;
|
||||
ready[a] = true;
|
||||
for (auto b : v[a]) {
|
||||
if (distance[a]+b.second < distance[b.first]) {
|
||||
distance[b.first] = distance[a]+b.second;
|
||||
q.push({-distance[b.first],b.first});
|
||||
for (auto u : v[a]) {
|
||||
int b = u.first, w = u.second;
|
||||
if (dist[a]+w < dist[b]) {
|
||||
dist[b] = dist[a]+w;
|
||||
q.push({-dist[b],b});
|
||||
}
|
||||
}
|
||||
}
|
||||
\end{lstlisting}
|
||||
|
||||
Note that the priority queue contains \emph{negative}
|
||||
distances to nodes.
|
||||
The reason for this is that the C++ priority queue finds the \emph{maximum}
|
||||
element by default while we would like to find \emph{minimum} elements.
|
||||
By using negative distances,
|
||||
we can directly use the default version of the C++ priority queue\footnote{Of
|
||||
course, we could also declare the priority queue as in Chapter 4.5
|
||||
and use positive distances, but the implementation would be a bit longer.}.
|
||||
|
||||
The time complexity of the above implementation is
|
||||
$O(n+m \log m)$ because the algorithm goes through
|
||||
all nodes in the graph and adds for each edge
|
||||
|
@ -761,17 +758,17 @@ The advantage of the
|
|||
Floyd–Warshall algorithm that it is
|
||||
easy to implement.
|
||||
The following code constructs a
|
||||
distance matrix \texttt{d} where $\texttt{d}[a][b]$
|
||||
distance matrix where $\texttt{dist}[a][b]$
|
||||
is the shortest distance between nodes $a$ and $b$.
|
||||
First, the algorithm initializes \texttt{d}
|
||||
using the adjacency matrix \texttt{v} of the graph:
|
||||
First, the algorithm initializes \texttt{dist}
|
||||
using the adjacency matrix \texttt{mat} of the graph:
|
||||
|
||||
\begin{lstlisting}
|
||||
for (int i = 1; i <= n; i++) {
|
||||
for (int j = 1; j <= n; j++) {
|
||||
if (i == j) d[i][j] = 0;
|
||||
else if (v[i][j]) d[i][j] = v[i][j];
|
||||
else d[i][j] = INF;
|
||||
if (i == j) dist[i][j] = 0;
|
||||
else if (mat[i][j]) dist[i][j] = mat[i][j];
|
||||
else dist[i][j] = INF;
|
||||
}
|
||||
}
|
||||
\end{lstlisting}
|
||||
|
@ -782,7 +779,7 @@ After this, the shortest distances can be found as follows:
|
|||
for (int k = 1; k <= n; k++) {
|
||||
for (int i = 1; i <= n; i++) {
|
||||
for (int j = 1; j <= n; j++) {
|
||||
d[i][j] = min(d[i][j], d[i][k]+d[k][j]);
|
||||
dist[i][j] = min(dist[i][j], dist[i][k]+dist[k][j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -790,7 +787,7 @@ for (int k = 1; k <= n; k++) {
|
|||
|
||||
The time complexity of the algorithm is $O(n^3)$,
|
||||
because it contains three nested loops
|
||||
that go through the nodes in the graph.
|
||||
that go through the nodes of the graph.
|
||||
|
||||
Since the implementation of the Floyd–Warshall
|
||||
algorithm is simple, the algorithm can be
|
||||
|
|
|
@ -101,7 +101,7 @@ The following recursive function can be used:
|
|||
\begin{lstlisting}
|
||||
void dfs(int s, int e) {
|
||||
// process node s
|
||||
for (auto u : v[s]) {
|
||||
for (auto u : adj[s]) {
|
||||
if (u != e) dfs(u, s);
|
||||
}
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ recursively using the following code:
|
|||
\begin{lstlisting}
|
||||
void dfs(int s, int e) {
|
||||
count[s] = 1;
|
||||
for (auto u : v[s]) {
|
||||
for (auto u : adj[s]) {
|
||||
if (u == e) continue;
|
||||
dfs(u, s);
|
||||
count[s] += count[u];
|
||||
|
|
Loading…
Reference in New Issue