go home Home | Main Page | Modules | Namespace List | Class Hierarchy | Alphabetical List | Data Structures | File List | Namespace Members | Data Fields | Globals | Related Pages
vnl_adjugate_fixed.h
Go to the documentation of this file.
1// This is core/vnl/vnl_adjugate_fixed.h
2#ifndef vnl_adjugate_fixed_h_
3#define vnl_adjugate_fixed_h_
4//:
5// \file
6// \brief Calculates adjugate or cofactor matrix of a small vnl_matrix_fixed.
7// Code is only a small adaptation from Peter Vanroose's vnl_inverse.h
8// adjugate == inverse/det
9// cofactor == adjugate^T
10// \author Floris Berendsen
11// \date 18 April 2013
12//
13// \verbatim
14// Code is only a small adaptation from Peter Vanroose's vnl_inverse.h
15// \endverbatim
16
17#include <vnl/vnl_matrix_fixed.h>
18#include <vnl/vnl_vector_fixed.h>
19#include <vnl/vnl_matrix.h>
20#include <vnl/vnl_det.h>
21
22#include <cassert>
23
24//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
25// This allows you to write e.g.
26//
27// x = vnl_adjugate(A) * b;
28//
29// Note that this function is inlined (except for the call to vnl_det()),
30// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
31// since that one is using svd.
32//
33// \relatesalso vnl_matrix_fixed
34
35template <class T>
36vnl_matrix_fixed<T, 1, 1> vnl_adjugate(vnl_matrix_fixed<T, 1, 1> const & m)
37{
38 return vnl_matrix_fixed<T, 1, 1>(m(0, 0));
39}
40
41//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
42// This allows you to write e.g.
43//
44// x = vnl_adjugate(A) * b;
45//
46// Note that this function is inlined (except for the call to vnl_det()),
47// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
48// since that one is using svd.
49//
50// \relatesalso vnl_matrix_fixed
51
52template <class T>
53vnl_matrix_fixed<T, 2, 2> vnl_adjugate(vnl_matrix_fixed<T, 2, 2> const & m)
54{
55 T d[4];
56 d[0] = m(1, 1);
57 d[1] = -m(0, 1);
58 d[3] = m(0, 0);
59 d[2] = -m(1, 0);
60 return vnl_matrix_fixed<T, 2, 2>(d);
61}
62
63//: Calculates adjugate of a small vnl_matrix_fixed (not using svd)
64// This allows you to write e.g.
65//
66// x = vnl_adjugate_fixed(A) * b;
67//
68// Note that this function is inlined (except for the call to vnl_det()),
69// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
70// since that one is using svd.
71//
72// \relatesalso vnl_matrix_fixed
73
74template <class T>
75vnl_matrix_fixed<T, 3, 3> vnl_adjugate(vnl_matrix_fixed<T, 3, 3> const & m)
76{
77 T d[9];
78 d[0] = (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1));
79 d[1] = (m(2, 1) * m(0, 2) - m(2, 2) * m(0, 1));
80 d[2] = (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1));
81 d[3] = (m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2));
82 d[4] = (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0));
83 d[5] = (m(1, 0) * m(0, 2) - m(1, 2) * m(0, 0));
84 d[6] = (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0));
85 d[7] = (m(0, 1) * m(2, 0) - m(0, 0) * m(2, 1));
86 d[8] = (m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0));
87 return vnl_matrix_fixed<T, 3, 3>(d);
88}
89
90//: Calculates adjugate_fixed of a small vnl_matrix_fixed (not using svd)
91// This allows you to write e.g.
92//
93// x = vnl_adjugate_fixed(A) * b;
94//
95// Note that this function is inlined (except for the call to vnl_det()),
96// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
97// since that one is using svd.
98//
99// \relatesalso vnl_matrix_fixed
100
101template <class T>
102vnl_matrix_fixed<T, 4, 4> vnl_adjugate(vnl_matrix_fixed<T, 4, 4> const & m)
103{
104 T d[16];
105 d[0] = m(1, 1) * m(2, 2) * m(3, 3) - m(1, 1) * m(2, 3) * m(3, 2) - m(2, 1) * m(1, 2) * m(3, 3) +
106 m(2, 1) * m(1, 3) * m(3, 2) + m(3, 1) * m(1, 2) * m(2, 3) - m(3, 1) * m(1, 3) * m(2, 2);
107 d[1] = -m(0, 1) * m(2, 2) * m(3, 3) + m(0, 1) * m(2, 3) * m(3, 2) + m(2, 1) * m(0, 2) * m(3, 3) -
108 m(2, 1) * m(0, 3) * m(3, 2) - m(3, 1) * m(0, 2) * m(2, 3) + m(3, 1) * m(0, 3) * m(2, 2);
109 d[2] = m(0, 1) * m(1, 2) * m(3, 3) - m(0, 1) * m(1, 3) * m(3, 2) - m(1, 1) * m(0, 2) * m(3, 3) +
110 m(1, 1) * m(0, 3) * m(3, 2) + m(3, 1) * m(0, 2) * m(1, 3) - m(3, 1) * m(0, 3) * m(1, 2);
111 d[3] = -m(0, 1) * m(1, 2) * m(2, 3) + m(0, 1) * m(1, 3) * m(2, 2) + m(1, 1) * m(0, 2) * m(2, 3) -
112 m(1, 1) * m(0, 3) * m(2, 2) - m(2, 1) * m(0, 2) * m(1, 3) + m(2, 1) * m(0, 3) * m(1, 2);
113 d[4] = -m(1, 0) * m(2, 2) * m(3, 3) + m(1, 0) * m(2, 3) * m(3, 2) + m(2, 0) * m(1, 2) * m(3, 3) -
114 m(2, 0) * m(1, 3) * m(3, 2) - m(3, 0) * m(1, 2) * m(2, 3) + m(3, 0) * m(1, 3) * m(2, 2);
115 d[5] = m(0, 0) * m(2, 2) * m(3, 3) - m(0, 0) * m(2, 3) * m(3, 2) - m(2, 0) * m(0, 2) * m(3, 3) +
116 m(2, 0) * m(0, 3) * m(3, 2) + m(3, 0) * m(0, 2) * m(2, 3) - m(3, 0) * m(0, 3) * m(2, 2);
117 d[6] = -m(0, 0) * m(1, 2) * m(3, 3) + m(0, 0) * m(1, 3) * m(3, 2) + m(1, 0) * m(0, 2) * m(3, 3) -
118 m(1, 0) * m(0, 3) * m(3, 2) - m(3, 0) * m(0, 2) * m(1, 3) + m(3, 0) * m(0, 3) * m(1, 2);
119 d[7] = m(0, 0) * m(1, 2) * m(2, 3) - m(0, 0) * m(1, 3) * m(2, 2) - m(1, 0) * m(0, 2) * m(2, 3) +
120 m(1, 0) * m(0, 3) * m(2, 2) + m(2, 0) * m(0, 2) * m(1, 3) - m(2, 0) * m(0, 3) * m(1, 2);
121 d[8] = m(1, 0) * m(2, 1) * m(3, 3) - m(1, 0) * m(2, 3) * m(3, 1) - m(2, 0) * m(1, 1) * m(3, 3) +
122 m(2, 0) * m(1, 3) * m(3, 1) + m(3, 0) * m(1, 1) * m(2, 3) - m(3, 0) * m(1, 3) * m(2, 1);
123 d[9] = -m(0, 0) * m(2, 1) * m(3, 3) + m(0, 0) * m(2, 3) * m(3, 1) + m(2, 0) * m(0, 1) * m(3, 3) -
124 m(2, 0) * m(0, 3) * m(3, 1) - m(3, 0) * m(0, 1) * m(2, 3) + m(3, 0) * m(0, 3) * m(2, 1);
125 d[10] = m(0, 0) * m(1, 1) * m(3, 3) - m(0, 0) * m(1, 3) * m(3, 1) - m(1, 0) * m(0, 1) * m(3, 3) +
126 m(1, 0) * m(0, 3) * m(3, 1) + m(3, 0) * m(0, 1) * m(1, 3) - m(3, 0) * m(0, 3) * m(1, 1);
127 d[11] = -m(0, 0) * m(1, 1) * m(2, 3) + m(0, 0) * m(1, 3) * m(2, 1) + m(1, 0) * m(0, 1) * m(2, 3) -
128 m(1, 0) * m(0, 3) * m(2, 1) - m(2, 0) * m(0, 1) * m(1, 3) + m(2, 0) * m(0, 3) * m(1, 1);
129 d[12] = -m(1, 0) * m(2, 1) * m(3, 2) + m(1, 0) * m(2, 2) * m(3, 1) + m(2, 0) * m(1, 1) * m(3, 2) -
130 m(2, 0) * m(1, 2) * m(3, 1) - m(3, 0) * m(1, 1) * m(2, 2) + m(3, 0) * m(1, 2) * m(2, 1);
131 d[13] = m(0, 0) * m(2, 1) * m(3, 2) - m(0, 0) * m(2, 2) * m(3, 1) - m(2, 0) * m(0, 1) * m(3, 2) +
132 m(2, 0) * m(0, 2) * m(3, 1) + m(3, 0) * m(0, 1) * m(2, 2) - m(3, 0) * m(0, 2) * m(2, 1);
133 d[14] = -m(0, 0) * m(1, 1) * m(3, 2) + m(0, 0) * m(1, 2) * m(3, 1) + m(1, 0) * m(0, 1) * m(3, 2) -
134 m(1, 0) * m(0, 2) * m(3, 1) - m(3, 0) * m(0, 1) * m(1, 2) + m(3, 0) * m(0, 2) * m(1, 1);
135 d[15] = m(0, 0) * m(1, 1) * m(2, 2) - m(0, 0) * m(1, 2) * m(2, 1) - m(1, 0) * m(0, 1) * m(2, 2) +
136 m(1, 0) * m(0, 2) * m(2, 1) + m(2, 0) * m(0, 1) * m(1, 2) - m(2, 0) * m(0, 2) * m(1, 1);
137 return vnl_matrix_fixed<T, 4, 4>(d);
138}
139
140//: Calculates adjugate_fixed of a small vnl_matrix_fixed (not using svd)
141// This allows you to write e.g.
142//
143// x = vnl_adjugate_fixed(A) * b;
144//
145// Note that this function is inlined (except for the call to vnl_det()),
146// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
147// since that one is using svd.
148//
149// \relatesalso vnl_matrix
150
151template <class T>
152vnl_matrix<T>
153vnl_adjugate_asfixed(vnl_matrix<T> const & m)
154{
155 assert(m.rows() == m.columns());
156 assert(m.rows() <= 4);
157 if (m.rows() == 1)
158 return vnl_matrix<T>(1, 1, T(1) / m(0, 0));
159 else if (m.rows() == 2)
160 return vnl_adjugate(vnl_matrix_fixed<T, 2, 2>(m)).as_ref();
161 else if (m.rows() == 3)
162 return vnl_adjugate(vnl_matrix_fixed<T, 3, 3>(m)).as_ref();
163 else
164 return vnl_adjugate(vnl_matrix_fixed<T, 4, 4>(m)).as_ref();
165}
166
167//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
168// This allows you to write e.g.
169//
170// x = vnl_cofactor(A) * b;
171//
172// Note that this function is inlined (except for the call to vnl_det()),
173// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
174// since that one is using svd. This is also faster than using
175//
176// x = vnl_adjugate_fixed(A).transpose() * b;
177//
178// \relatesalso vnl_matrix_fixed
179
180template <class T>
181vnl_matrix_fixed<T, 1, 1> vnl_cofactor(vnl_matrix_fixed<T, 1, 1> const & m)
182{
183 return vnl_matrix_fixed<T, 1, 1>(T(1) / m(0, 0));
184}
185
186//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
187// This allows you to write e.g.
188//
189// x = vnl_cofactor(A) * b;
190//
191// Note that this function is inlined (except for the call to vnl_det()),
192// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
193// since that one is using svd. This is also faster than using
194//
195// x = vnl_adjugate_fixed(A).transpose() * b;
196//
197// \relatesalso vnl_matrix_fixed
198
199template <class T>
200vnl_matrix_fixed<T, 2, 2> vnl_cofactor(vnl_matrix_fixed<T, 2, 2> const & m)
201{
202
203 T d[4];
204 d[0] = m(1, 1);
205 d[2] = -m(0, 1);
206 d[3] = m(0, 0);
207 d[1] = -m(1, 0);
208 return vnl_matrix_fixed<T, 2, 2>(d);
209}
210
211//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
212// This allows you to write e.g.
213//
214// x = vnl_cofactor(A) * b;
215//
216// Note that this function is inlined (except for the call to vnl_det()),
217// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
218// since that one is using svd. This is also faster than using
219//
220// x = vnl_adjugate_fixed(A).transpose() * b;
221//
222// \relatesalso vnl_matrix_fixed
223
224template <class T>
225vnl_matrix_fixed<T, 3, 3> vnl_cofactor(vnl_matrix_fixed<T, 3, 3> const & m)
226{
227
228 T d[9];
229 d[0] = (m(1, 1) * m(2, 2) - m(1, 2) * m(2, 1));
230 d[3] = (m(2, 1) * m(0, 2) - m(2, 2) * m(0, 1));
231 d[6] = (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1));
232 d[1] = (m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2));
233 d[4] = (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0));
234 d[7] = (m(1, 0) * m(0, 2) - m(1, 2) * m(0, 0));
235 d[2] = (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0));
236 d[5] = (m(0, 1) * m(2, 0) - m(0, 0) * m(2, 1));
237 d[8] = (m(0, 0) * m(1, 1) - m(0, 1) * m(1, 0));
238 return vnl_matrix_fixed<T, 3, 3>(d);
239}
240
241//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
242// This allows you to write e.g.
243//
244// x = vnl_cofactor(A) * b;
245//
246// Note that this function is inlined (except for the call to vnl_det()),
247// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
248// since that one is using svd. This is also faster than using
249//
250// x = vnl_adjugate_fixed(A).transpose() * b;
251//
252// \relatesalso vnl_matrix_fixed
253
254template <class T>
255vnl_matrix_fixed<T, 4, 4> vnl_cofactor(vnl_matrix_fixed<T, 4, 4> const & m)
256{
257 T d[16];
258 d[0] = m(1, 1) * m(2, 2) * m(3, 3) - m(1, 1) * m(2, 3) * m(3, 2) - m(2, 1) * m(1, 2) * m(3, 3) +
259 m(2, 1) * m(1, 3) * m(3, 2) + m(3, 1) * m(1, 2) * m(2, 3) - m(3, 1) * m(1, 3) * m(2, 2);
260 d[4] = -m(0, 1) * m(2, 2) * m(3, 3) + m(0, 1) * m(2, 3) * m(3, 2) + m(2, 1) * m(0, 2) * m(3, 3) -
261 m(2, 1) * m(0, 3) * m(3, 2) - m(3, 1) * m(0, 2) * m(2, 3) + m(3, 1) * m(0, 3) * m(2, 2);
262 d[8] = m(0, 1) * m(1, 2) * m(3, 3) - m(0, 1) * m(1, 3) * m(3, 2) - m(1, 1) * m(0, 2) * m(3, 3) +
263 m(1, 1) * m(0, 3) * m(3, 2) + m(3, 1) * m(0, 2) * m(1, 3) - m(3, 1) * m(0, 3) * m(1, 2);
264 d[12] = -m(0, 1) * m(1, 2) * m(2, 3) + m(0, 1) * m(1, 3) * m(2, 2) + m(1, 1) * m(0, 2) * m(2, 3) -
265 m(1, 1) * m(0, 3) * m(2, 2) - m(2, 1) * m(0, 2) * m(1, 3) + m(2, 1) * m(0, 3) * m(1, 2);
266 d[1] = -m(1, 0) * m(2, 2) * m(3, 3) + m(1, 0) * m(2, 3) * m(3, 2) + m(2, 0) * m(1, 2) * m(3, 3) -
267 m(2, 0) * m(1, 3) * m(3, 2) - m(3, 0) * m(1, 2) * m(2, 3) + m(3, 0) * m(1, 3) * m(2, 2);
268 d[5] = m(0, 0) * m(2, 2) * m(3, 3) - m(0, 0) * m(2, 3) * m(3, 2) - m(2, 0) * m(0, 2) * m(3, 3) +
269 m(2, 0) * m(0, 3) * m(3, 2) + m(3, 0) * m(0, 2) * m(2, 3) - m(3, 0) * m(0, 3) * m(2, 2);
270 d[9] = -m(0, 0) * m(1, 2) * m(3, 3) + m(0, 0) * m(1, 3) * m(3, 2) + m(1, 0) * m(0, 2) * m(3, 3) -
271 m(1, 0) * m(0, 3) * m(3, 2) - m(3, 0) * m(0, 2) * m(1, 3) + m(3, 0) * m(0, 3) * m(1, 2);
272 d[13] = m(0, 0) * m(1, 2) * m(2, 3) - m(0, 0) * m(1, 3) * m(2, 2) - m(1, 0) * m(0, 2) * m(2, 3) +
273 m(1, 0) * m(0, 3) * m(2, 2) + m(2, 0) * m(0, 2) * m(1, 3) - m(2, 0) * m(0, 3) * m(1, 2);
274 d[2] = m(1, 0) * m(2, 1) * m(3, 3) - m(1, 0) * m(2, 3) * m(3, 1) - m(2, 0) * m(1, 1) * m(3, 3) +
275 m(2, 0) * m(1, 3) * m(3, 1) + m(3, 0) * m(1, 1) * m(2, 3) - m(3, 0) * m(1, 3) * m(2, 1);
276 d[6] = -m(0, 0) * m(2, 1) * m(3, 3) + m(0, 0) * m(2, 3) * m(3, 1) + m(2, 0) * m(0, 1) * m(3, 3) -
277 m(2, 0) * m(0, 3) * m(3, 1) - m(3, 0) * m(0, 1) * m(2, 3) + m(3, 0) * m(0, 3) * m(2, 1);
278 d[10] = m(0, 0) * m(1, 1) * m(3, 3) - m(0, 0) * m(1, 3) * m(3, 1) - m(1, 0) * m(0, 1) * m(3, 3) +
279 m(1, 0) * m(0, 3) * m(3, 1) + m(3, 0) * m(0, 1) * m(1, 3) - m(3, 0) * m(0, 3) * m(1, 1);
280 d[14] = -m(0, 0) * m(1, 1) * m(2, 3) + m(0, 0) * m(1, 3) * m(2, 1) + m(1, 0) * m(0, 1) * m(2, 3) -
281 m(1, 0) * m(0, 3) * m(2, 1) - m(2, 0) * m(0, 1) * m(1, 3) + m(2, 0) * m(0, 3) * m(1, 1);
282 d[3] = -m(1, 0) * m(2, 1) * m(3, 2) + m(1, 0) * m(2, 2) * m(3, 1) + m(2, 0) * m(1, 1) * m(3, 2) -
283 m(2, 0) * m(1, 2) * m(3, 1) - m(3, 0) * m(1, 1) * m(2, 2) + m(3, 0) * m(1, 2) * m(2, 1);
284 d[7] = m(0, 0) * m(2, 1) * m(3, 2) - m(0, 0) * m(2, 2) * m(3, 1) - m(2, 0) * m(0, 1) * m(3, 2) +
285 m(2, 0) * m(0, 2) * m(3, 1) + m(3, 0) * m(0, 1) * m(2, 2) - m(3, 0) * m(0, 2) * m(2, 1);
286 d[11] = -m(0, 0) * m(1, 1) * m(3, 2) + m(0, 0) * m(1, 2) * m(3, 1) + m(1, 0) * m(0, 1) * m(3, 2) -
287 m(1, 0) * m(0, 2) * m(3, 1) - m(3, 0) * m(0, 1) * m(1, 2) + m(3, 0) * m(0, 2) * m(1, 1);
288 d[15] = m(0, 0) * m(1, 1) * m(2, 2) - m(0, 0) * m(1, 2) * m(2, 1) - m(1, 0) * m(0, 1) * m(2, 2) +
289 m(1, 0) * m(0, 2) * m(2, 1) + m(2, 0) * m(0, 1) * m(1, 2) - m(2, 0) * m(0, 2) * m(1, 1);
290 return vnl_matrix_fixed<T, 4, 4>(d);
291}
292
293//: Calculates transpose of the adjugate_fixed of a small vnl_matrix_fixed (not using svd)
294// This allows you to write e.g.
295//
296// x = vnl_cofactor(A) * b;
297//
298// Note that this function is inlined (except for the call to vnl_det()),
299// which makes it much faster than the vnl_matrix_adjugate_fixed class in vnl/algo
300// since that one is using svd. This is also faster than using
301//
302// x = vnl_adjugate_fixed(A).transpose() * b;
303//
304// \relatesalso vnl_matrix
305
306template <class T>
307vnl_matrix<T>
308vnl_cofactor(vnl_matrix<T> const & m)
309{
310 assert(m.rows() == m.columns());
311 assert(m.rows() <= 4);
312 if (m.rows() == 1)
313 return vnl_matrix<T>(1, 1, T(1) / m(0, 0));
314 else if (m.rows() == 2)
315 return vnl_cofactor(vnl_matrix_fixed<T, 2, 2>(m)).as_ref();
316 else if (m.rows() == 3)
317 return vnl_cofactor(vnl_matrix_fixed<T, 3, 3>(m)).as_ref();
318 else
319 return vnl_cofactor(vnl_matrix_fixed<T, 4, 4>(m)).as_ref();
320}
321
322
323template <class T>
324vnl_vector_fixed<T, 3> vnl_cofactor_row1(vnl_vector_fixed<T, 3> const & row2, vnl_vector_fixed<T, 3> const & row3)
325{
326 T d[3];
327 d[0] = (row2[1] * row3[2] - row2[2] * row3[1]);
328 d[1] = (row2[2] * row3[0] - row2[0] * row3[2]);
329 d[2] = (row2[0] * row3[1] - row2[1] * row3[0]);
330 return vnl_vector_fixed<T, 3>(d);
331}
332
333#endif // vnl_adjugate_fixed_h_
vnl_vector_fixed< T, 3 > vnl_cofactor_row1(vnl_vector_fixed< T, 3 > const &row2, vnl_vector_fixed< T, 3 > const &row3)
vnl_matrix< T > vnl_adjugate_asfixed(vnl_matrix< T > const &m)
vnl_matrix_fixed< T, 1, 1 > vnl_adjugate(vnl_matrix_fixed< T, 1, 1 > const &m)
vnl_matrix_fixed< T, 1, 1 > vnl_cofactor(vnl_matrix_fixed< T, 1, 1 > const &m)


Generated on 2024-07-17 for elastix by doxygen 1.11.0 (9b424b03c9833626cd435af22a444888fbbb192d) elastix logo