-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy pathdifferential-topology.tex
More file actions
620 lines (474 loc) · 40.5 KB
/
differential-topology.tex
File metadata and controls
620 lines (474 loc) · 40.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
\documentclass[11pt,letterpaper,fleqn]{memoir}
\input{formatting} % Loads the book formatting
\usepackage{tikz}
% Adding pdf index and links within the document
% Custom formatting to remove default red boxes
\usepackage{hyperref}
\hypersetup{
colorlinks=true,
urlcolor=blue,
linkcolor=blue
}
\urlstyle{same}
% ---------------------------
\begin{document}
\tableofcontents*
\cleardoublepage
\mainmatter
\chapter{Infinitesimal reducibility}
\section{Differentiability}
The main idea is to just use the vector space structure of $\mathbb{R}^n$ to define a notion of differential, and then recover differentiability as maps that preserve that notion.
\subsubsection{Differential}
We define the differential as a sequence that converges to zero along a specific direction.
\begin{defn}[Convergence envelope]
A \textbf{convergence envelope} $\{a_i\}_{i=1}^{\infty}$ is a sequence of non-zero elements of $\mathbb{R}$ that converges to $0$.
\end{defn}
\iffalse
\begin{defn}[Convergence at the same rate]
Let $\{a_i\}_{i=1}^{\infty}$ and $\{b_i\}_{i=1}^{\infty}$ be two convergence envelopes. We say that they \textbf{converge at the same rate} if
$$ \lim\limits_{i \to \infty} \frac{a_i}{b_i} = 1.$$
\end{defn}
\begin{prop}
Convergence at the same rate is an equivalence relation.
\end{prop}
\begin{proof}
For reflexivity:
$$ \lim\limits_{i \to \infty} \frac{a_i}{a_i} = \lim\limits_{i \to \infty} 1 = 1.$$
For symmetry:
$$ \lim\limits_{i \to \infty} \frac{b_i}{a_i} = \lim\limits_{i \to \infty} \frac{1}{\frac{a_i}{b_i}} = \frac{ \lim\limits_{i \to \infty} 1}{\lim\limits_{i \to \infty} \frac{a_i}{b_i}} = \frac{1}{1} = 1.$$
For transitivity:
$$ \lim\limits_{i \to \infty} \frac{a_i}{c_i} = \lim\limits_{i \to \infty} \frac{a_i}{b_i}\frac{b_i}{c_i} = \lim\limits_{i \to \infty} \frac{a_i}{b_i} \lim\limits_{i \to \infty} \frac{b_i}{c_i} = 1 \cdot 1 = 1.$$
\end{proof}
\begin{defn}[Convergence class]
A \textbf{convergence class} is an equivalence class of sequences that converge at the same rate.
\end{defn}
\fi
\begin{defn}
Let $V$ be a real topological vector space. A \textbf{differential} $dv$ is a sequence of vectors $\{v_i\}_{i=1}^{\infty}$ such that there exists a vector $t \in V$ and a convergence envelope $\{a_i\}_{i=1}^{\infty}$ for which
$$ \lim\limits_{i \to \infty} \frac{v_i}{a_i} = t.$$
We call $t$ the \textbf{tangent vector} of the differential and $\{a_i\}_{i=1}^{\infty}$ its \textbf{convergence envelope}. We note $dv[a_i \, t]$ the differential with its tangent vector and convergence envelope.
\end{defn}
\begin{prop}
Let $dv$ be a differential. It can be expressed as
$$ v_i = a_i t_i = a_i(t + w_i)$$
where $\{t_i\}_{i=1}^{\infty}$ is a sequence of vectors that converges to $t$ and $w_i$ is a sequence of vectors that converges to $0$.
\end{prop}
\begin{proof}
For the first expression, we can write $t_i = \frac{v_i}{a_i}$. Given the definition of differential, that sequence converges to $t$.
For the second expression, we can write $v_i = a_i t + a_i \left(t_i - t\right)$ for all $i$. We can set $w_i = t_i - t$ and write $v_i = a_i(t + w_i)$. We have:
$$\lim\limits_{i \to \infty} t_i = \lim\limits_{i \to \infty} t + \lim\limits_{i \to \infty} w_i $$
The sequence on the left converges to $t$, the first sequence on the right converges to $t$, therefore the second sequence on the right converges to zero.
\end{proof}
\begin{prop}
Let $V$ be a first-countable topological vector space. Every sequence $v_i \to 0$ is a differential with tangent vector $0$. That is, there exists a $a_i \to 0$ and $t_i \to 0$ such that $v_i = a_i t_i$.
\end{prop}
\begin{proof}
Since we are in a first countable space, we have a countable basis of neighborhoods around the origin. Let $U_{m}$ be the $m^{th}$ basis neighborhood.
For $U_{m}$ there's going to exist a minimal $${}_{U_{m}}N_{n} \in \mathbb{N}\,\, s.t.\,\, \forall i > {}_{U_{m}}N_{n},\, v_{i} \in \frac{U_{m}}{n \cdot 2^n}$$ where $n, m \in \mathbb{N}$. Note that since $U_{m}$ is a neighborhood around 0, then, by continuity of multiplication, $\frac{U_{m}}{n \,\cdot\, 2^n}$ is also a neighborhood around 0.
Let $k_{1} = {}_{U_{1}}N_{1}$.
Let $k_{n} = \max(k_{n-1},\, n + 1,\, \max(\{{}_{U_{m}}N_{n} : m \leq k_{n-1}\})) + 1$. Note that $\max(k_{n-1}, \dots)$ and the increment at the end are there to guarantee that the sequence of $k_{n}$'s is strictly increasing.
Notice how ${}_{U_{1}}N_{1} = k_{1} < k_{2} < k_{3} < \dots$ Allowing us to say that,
$$\forall i \geq k_{1},\, \exists!\,n_{i}\,\,s.t.\,\, k_{n_{i}} \leq i < k_{n_{i}+1}$$
Now, define $t_{i}$ and $a_{i}$ in the following way:
$$
\forall i < k_{1}, t_{i} = v_{i} \land a_{i} = 1
$$
$$
\forall i \geq k_{1}, t_{i} = n_{i}v_{i} \land a_{i} = \frac{1}{n_{i}}
$$
(this $n_{i}$ is the unique $n_{i}$ we just described).
Since the $k_{n}$'s are strictly increasing we never "run out" of $k_{n}$'s (more importantly, of $n$'s), so we know $a_{i} \to 0$.
Let us now prove that $t_{i} \to 0$. Fix a basis neighborhood $U_{n}$.
We know $k_{n-1} \geq n$ (except for $n = 1$, but this doesn't matter, as what we really want is the result $k_{n} \geq {}_{U_{n}}N_{n}$ which is true for $n = 1$) by definition since $k_{n-1} = max(\dots, n, \dots) + 1$. Similarly, we know that $k_{n} \geq {}_{U_{n}}N_{n}$ by definition.
Then, $\forall i > k_{n}$, $k_{n_{i}} \geq {}_{U_{n}}N_{n_{i}}$, thus, $v_{i} \in \frac{U_{n}}{n_{i} \cdot 2^{n_{i}}}$, so
$$
v_{i} \in \frac{U_{n}}{n_{i} \cdot 2^{n_{i}}}
$$
$$
n_{i}v_{i} \in \frac{U_{n}}{2^{n_{i}}}
$$
$$
t_{i} \in \frac{U_{n}}{2^{n_{i}}} \subseteq U_{n}
$$
So $t_{i} \in U_{n}$ for all $i > k_{n}$, which obeys the definition of convergence in topological spaces.
Finally, notice that $\forall i$, $v_{i} = a_{i}t_{i}$, which completes the proof.
\end{proof}
\begin{prop}
Differentials respect the following property
$$ dv[a_i \, kt] = dv[k a_i \, t].$$
for any $k \in \mathbb{R}$. That is, a differential with tangent vector $kt$ and convergence envelope $\{a_i\}_{i=1}^{\infty}$ is also a differential with tangent vector $t$ and convergence envelope $\{ka_i\}_{i=1}^{\infty}$.
\end{prop}
\begin{proof}
We have $v_i = a_i (k t_i) = (k a_i) t_i$.
\end{proof}
\begin{prop}
Differentials with the same convergence envelope form a vector space. That is
$$ b \, dv[a_i \, t] + c \, dv[a_i \, u] = dv[a_i \, (bt+cu)].$$
for any $t,u \in V$ and $b,c \in \mathbb{R}$.
\end{prop}
\begin{proof}
We have $b a_i t_i + c a_i u_i = a_i (b t_i + c u_i)$.
\end{proof}
\iffalse
\begin{prop}
Let $\{v_i\}_{i=1}^{\infty}$ be a directionally convergent sequence with tangent vector $v$ and rate of convergence $\{a_i\}_{i=1}^{\infty}$. Let $\{b_i\}_{i=1}^{\infty}$ be a convergence envelope that converges at the same rate of $\{a_i\}_{i=1}^{\infty}$. Then $\{v_i\}_{i=1}^{\infty}$ is also a directionally convergent sequence with tangent vector $v$ and rate of convergence $\{b_i\}_{i=1}^{\infty}$.
\end{prop}
\begin{proof}
Note that
$$ \lim\limits_{i \to \infty} \frac{v_i}{b_i} = \lim\limits_{i \to \infty} \frac{v_i}{a_i}\frac{a_i}{b_i} = \lim\limits_{i \to \infty} \frac{v_i}{a_i} \lim\limits_{i \to \infty} \frac{a_i}{b_i} = \lim\limits_{i \to \infty} \frac{v_i}{a_i}.$$
Therefore all convergence envelopes that converge at the same rate will yield the same results.
\end{proof}
\fi
\subsubsection{Differentiability}
We define a map to be differentiable if it maps differentials to differentials with the same convergence envelope.
\begin{defn}
Let $V$ and $W$ be two vector spaces. Given a map $f: V \to W$, a sequence $\{v_i\}_{i=1}^{\infty}$ that converges to some $v \in V$ and a differential $dv[a_i \, t]$, we define the \textbf{image of the differential through the map} as $df(v_i, dv[a_i \, t]) = \{f(v_i + a_i t_i) - f(v_i)\}_{i=1}^{\infty}$. The map is \textbf{differentiable} at $v$ if there exists a map $\left.\frac{df}{dV} \right|_{v}: V \to W$, called \textbf{derivative} such that $df(v_i, dv[a_i \, t]) = dw[a_i \, \left.\frac{df}{dV} \right|_{v} (t)]$ for all $\{v_i\}_{i=1}^{\infty}$ and for all differentials. That is, $df$ maps differentials of $V$ to differentials of $W$ that have the same convergence envelope and a tangent vector that depends only on the original tangent vector.
\end{defn}
\begin{prop}
A map $f : V \to W$ is differentiable at $v$ if and only if the limit
$$ \lim\limits_{i \to \infty} \frac{f(v_i + a_i t_i) - f(v_i)}{a_i} = \left.\frac{df}{dV} \right|_{v} (t)$$
converges for all sequences $\{v_i\}_{i=1}^{\infty}$ that converge to $v$, $\{a_i\}_{i=1}^{\infty}$ that converge to $0$ with $a_i \neq 0$ for all $i$ and $\{t_i\}_{i=1}^{\infty}$ that converge to some $t$.
\end{prop}
\begin{proof}
The expression is simply applying the definition of differential to $df$.
\end{proof}
\begin{remark}
This definition of differentiability implies Hadamard differentiability and Gateaux differentiability. This means that, for normed vector spaces, it also implies Frechet differentiability. Unlike Hadamard and Gateaux, it implies linearity, which therefore is not an extra condition.
\end{remark}
\begin{prop}
The derivative must be a linear function.
\end{prop}
\begin{proof}
Recall that $dv[a_i \, kt] = dv[k a_i \, t]$, therefore $dw[a_i \, \left.\frac{df}{dV} \right|_{v} (kt)] = df(v_i, dv[a_i \, kt]) = df(v_i, dv[ka_i \, t]) = dw[ka_i \, \left.\frac{df}{dV} \right|_{v} (t)] = dw[a_i \, k \left.\frac{df}{dV} \right|_{v} (t)]$. Therefore $\left.\frac{df}{dV} \right|_{v} (kt) = k \left.\frac{df}{dV} \right|_{v} (t)$.
We also have
\begin{align*}
dw[a_i \, \left.\frac{df}{dV} \right|_{v} (t+u)] &= df(v_i, dv[a_i \, t + u]) = \{f(v_i + a_i (t_i + u_i)) - f(v_i)\}_{i=1}^{\infty} \\
&= \{f(v_i + a_i (t_i + u_i)) - f(v_i + a_i t_i) + f(v_i + a_i t_i) - f(v_i)\}_{i=1}^{\infty} \\
&= \{f((v_i + a_i t_i) + a_i u_i) - f(v_i + a_i t_i)\}_{i=1}^{\infty} + \{f(v_i + a_i t_i) - f(v_i)\}_{i=1}^{\infty} \\
&= df(v_i + a_i t_i, dv[a_i \, u]) + df(v_i, dv[a_i \, t]) \\
&= dw[a_i \, \left.\frac{df}{dV} \right|_{v} (u)]+dw[a_i \, \left.\frac{df}{dV} \right|_{v} (t)] \\
&= dw[a_i \, \left.\frac{df}{dV} \right|_{v} (u) + \left.\frac{df}{dV} \right|_{v} (t)]
\end{align*}
\textbf{Alternative proof.} Since, if the derivative exists, it depends only on the tangent vector, it is sufficient to prove that the derivative is linear for particular sequences. It will then have to depend linearly on all sequences with the same tangent vector.
First we show that the derivative for the zero vector must be zero. Since the sequence of vectors $\{0\}_{i=1}^{\infty}$ is a sequence convergent to $0$, $\{a_i 0\}_{i=1}^{\infty}$ is a differential with $0$ as tangent vector. We have
\begin{align*}
\left.\frac{df}{dV} \right|_{v} (0) &= \lim\limits_{i \to \infty} \frac{f(v_i + a_i 0) - f(v_i)}{a_i}
= \lim\limits_{i \to \infty} \frac{f(v_i ) - f(v_i)}{a_i} = \lim\limits_{i \to \infty} 0 = 0
\end{align*}
Let $\{a_i t_i\}_{i=1}^{\infty}$ and $\{a_i u_i\}_{i=1}^{\infty}$ be two differentials such that $t_i \to t$ and $u_i \to u$, and $b,c \in \mathbb{R}$. Suppose $b$ is zero. Then
\begin{align*}
\left.\frac{df}{dV} \right|_{v} (b t + c u) &= \left.\frac{df}{dV} \right|_{v} (0 + c u) = \left.\frac{df}{dV} \right|_{v} (c u) = 0 + \left.\frac{df}{dV} \right|_{v} (c u) = \left.\frac{df}{dV} \right|_{v} (0) + \left.\frac{df}{dV} \right|_{v} (c u) \\
&= \left.\frac{df}{dV} \right|_{v} (b t) + \left.\frac{df}{dV} \right|_{v} (c u)
\end{align*}
If $b$ and $c$ are different from zero, we have
\begin{align*}
\left.\frac{df}{dV} \right|_{v} (b t + c u) &= \lim\limits_{i \to \infty} \frac{f(v_i + a_i (b t_i + c u_i)) - f(v_i)}{a_i} \\
&= \lim\limits_{i \to \infty} \frac{f(v_i + c a_i u_i + b a_i t_i) - f(v_i + c a_i u_i) + f(v_i + c a_i u_i) - f(v_i)}{a_i} \\
&= \lim\limits_{i \to \infty} \frac{f((v_i + c a_i u_i) + b a_i t_i) - f(v_i + c a_i u_i)}{a_i}
+ \lim\limits_{i \to \infty} \frac{ f(v_i + c a_i u_i) - f(v_i)}{a_i} \\
&= \lim\limits_{i \to \infty} \frac{f((v_i + c a_i u_i) + b a_i t_i) - f(v_i + c a_i u_i)}{b a_i} b
+ \lim\limits_{i \to \infty} \frac{ f(v_i + c a_i u_i) - f(v_i)}{c a_i} c \\
&= b \left.\frac{df}{dV} \right|_{v} (t) + c \left.\frac{df}{dV} \right|_{v} (u)
\end{align*}
\end{proof}
\begin{coro}
If $f : V \to W$ is differentiable, then $f$ is continuous.
\end{coro}
\begin{proof}
Let $v \in V$ be a vector and $v_i$ a sequence of vectors that go to zero. Then $v_i$ is a differential and can be written as $a_i t_i$ where $a_i \to 0$ and $t_i \to 0$.
We have:
\begin{equation}
\begin{aligned}
\lim_{ i \to \infty } f(v_i) - f(v) &= \lim_{ i \to \infty } f(v + a_{i}t_{i}) - f(v) = \lim_{ i \to \infty } a_{i} \frac{f(v + a_{i}t_{i}) - f(v)}{a_{i}} \\
&= \lim_{ i \to \infty } a_{i} \lim_{ i \to \infty } \frac{f(v + a_{i}t_{i}) - f(v)}{a_{i}} = 0 \cdot \frac{df}{dV}(0) = 0 \cdot 0 \\
&= 0.
\end{aligned}
\end{equation}
\end{proof}
\begin{conj}
If $f : V \to W$ is differentiable, then the derivative $\frac{df}{dV}$ is continuous.
\end{conj}
\begin{proof}
Since the derivative is linear, we only need to prove that is continuous at one point, for example $0$. That is, we need to show that for every $t_i \to 0$, $\frac{df}{dV}(t_i) \to 0$.
\end{proof}
\begin{remark}
Let us calculate the derivative of $x^2$ at a generic point $x$ with a generic differential $dx = \{a_i t_i\}$.
$$ \lim\limits_{i \to \infty} \frac{(x_i + a_i t_i)^2 - x_i^2}{a_i} = \lim\limits_{i \to \infty} \frac{x_i^2 + 2 x_i a_i t_i + a_i^2t_i^2 - x_i^2}{a_i} = \lim\limits_{i \to \infty} \frac{2 x_i a_i t_i + a_i^2t_i^2}{a_i}$$
$$= \lim\limits_{i \to \infty} 2 x_i t_i + a_i t_i^2=2xt$$
This shows that higher orders here are simply higher powers of $a_i$.
\end{remark}
\begin{remark}
Consider a sequence $v_i$. Suppose it converges to $v$. We can write the differential $v_i - v$. Now suppose it is a differential. Then we can find $a_i$ such that $\frac{v_i - v}{a_i} = t_i$ converge to $t$. We can write $t_i - t$. Suppose this is also a differential with the same convergence envelope. Then $\frac{t_i - t}{a_i} = c_i$ converges to some $c$.
Putting all together, we can write $v_i = a_i^0 v + a_i^1 t + a_i^2 c + a_i^3 w_i$ where $w_i = c_i - c$. $v$, $t$ and $c$ represent respectively the zeroth, first and second order term of the sequence. $w_i$ is the higher order.
Suppose the index $i$ does not span to infinity, but ends at some finite $n$. Then the $n$ sequences $a_i^j$ with $0 \leq k < n$ are linearly independent. Then we can write the finite sequence $v_i$ as $v_i = \sum c_j a_i^j$. If we take the limit as $n$ goes to infinity, this does not work: all the sequences $\{a_i^j\}_{i=0}^\infty$ converge for any $j$, therefore we can only write sequences that converge. An infinitely smooth sequence, then, is one that can be expressed with the infinite sum. An infinitely smooth function retains, in a sense, the property that it can be decomposed like a finite sequence.
\end{remark}
\begin{prop}[Chain rule]
Let $U$, $V$ and $W$ be three vector spaces. Let $f : U \to V$ and $g : V \to W$ be two differentiable maps and $h = g \circ f$ their composition. Then $f$ is differentiable and $\frac{dh}{dU} = \frac{dg}{dV} \circ \frac{df}{dU}$.
\end{prop}
\begin{proof}
Since $f$ is differentiable, we have $df(u_i, du[a_i \, t]) = dv[a_i \left.\frac{df}{dU}\right|_{u}(t) ]$ for all $u_i \to u$, convergence envelopes $a_i$ and tangent vectors $t$. Since $g$ is differentiable, we have $dg(v_i, dv[a_i \, t]) = dw[a_i \left.\frac{dg}{dV}\right|_{v}(t) ]$ for all $v_i \to v$, convergence envelopes $a_i$ and tangent vectors $t$. In particular, we have
\begin{equation}
\begin{aligned}
dw[a_i \left.\frac{dg}{dV}\right|_{v}\left(\left.\frac{df}{dU}\right|_{u}(t)\right) ] &= dg(f(u_i), dv[a_i \left.\frac{df}{dU}\right|_{u}(t) ]) \\
&= dg(f(u_i), df(u_i, du[a_i \, t])) \\
&= dg(f(u_i), df(u_i, \{a_i t_i\})) \\
&=dg(f(u_i), \{ f(u_i + a_i t_i) - f(u_i) \})) \\
&=\{g(f(u_i) + f(u_i + a_i t_i) - f(u_i))- g(f(u_i))\} \\
&=\{g(f(u_i + a_i t_i))- g(f(u_i))\} \\
&=\{h(u_i + a_i t_i)- h(u_i)\} \\
&=dh(u_i, du[a_i \, t]).
\end{aligned}
\end{equation}
Therefore the image of the differential through $h$ is a differential with convergence envelope $a_i$ and tangent vector $\left.\frac{dg}{dV}\right|_{v}\left(\left.\frac{df}{dU}\right|_{u}(t)\right)$. The derivative of $h$ is
\begin{equation}
\frac{dh}{dU} = \frac{dg}{dV} \circ \frac{df}{dU}
\end{equation}
\textbf{Alternative proof.} TODO: Clenup. Maybe work directly with alternative definition? Let $u \in U$ and $du[a_{i}\, t]$ and $du[b_{i}\,t]$ be differentials.
let $\left.\frac{df}{dU}\right|_{v}^{i}\,(t) = \frac{f(v_{i}\,+\,a_{i} t_{i})\,-\,f(v_{i})}{a_{i}}$ be a sequence that converges to $\left.\frac{df}{dU}\right|_{u}(t)$. Then...
$$
\left.\frac{dg}{dV}\right|_{f(v)} \circ \left.\frac{df}{dU}\right|_{v} (t) = \lim_{ i \to \infty } \frac{g\left( f(v_{i}) + b_{i}\left.\frac{df}{dU}\right|_{u}^{i}(t) \right) - g(f(v_{i}))}{b_{i}}
$$
$$
= \lim_{i \to \infty} \frac{g(f(v_{i}) + b_{i}\frac{f(v_{i} + a_{i}t_{i}) - f(v_{i})}{a_{i}}) - g(f(v_{i}))}{b_{i}}
$$
$$
= \lim_{i \to \infty} \frac{g(\frac{a_{i}f(v_{i})\,+\,b_{i}f(v_{i}+a_{i}t_{i}) - b_{i}f(v_{i})}{a_{i}}) - g(f(v_{i}))}{b_{i}}
$$
$$
= \lim_{i \to \infty} \frac{g(\frac{(a_{i}\,-\,b_{i})f(v_{i})\,+\,b_{i}f(v_{i}\,+\,a_{i}t_{i})}{a_{i}}) - g(f(v_{i}))}{b_{i}}
$$
Although $b_{i}$ is a generic convergence envelope, notice how we can use a specific value for this convergence envelope (as the definition of derivative requires the limit to be the same for all convergence envelopes) so let $b_{i} = a_{i}$:
$$
= \lim_{i \to \infty} \frac{g(\frac{(a_{i}\,-\,a_{i})f(v_{i})\,+\,a_{i}f(v_{i}\,+\,a_{i}t_{i})}{a_{i}}) - g(f(v_{i}))}{a_{i}}
$$
$$
= \lim_{i \to \infty} \frac{g(\frac{a_{i}f(v_{i}\,+\,a_{i}t_{i})}{a_{i}}) - g(f(v_{i}))}{a_{i}}
$$
$$
= \lim_{i \to \infty} \frac{g(f(v_{i}\,+\,a_{i}t_{i})) - g(f(v_{i}))}{a_{i}}
$$
$$
= \lim_{i \to \infty} \frac{h(v_{i}\,+\,a_{i}t_{i})) - h(v_{i}))}{a_{i}}
$$
% Given that $f$ and $g$ are differentiable, a differential of $U$ will be mapped to a differential of $V$ which will be mapped to a differential of $W$. The composite map $h$ will map a differential of $U$ to a differential of $W$ and therefore it is differentiable and the derivative exists. The differential returned by $h$ will be the one returned by $g$ and $f$ combined, therefore the derivative of $h$ is the function combination of the derivatives of $g$ and $f$.
\end{proof}
\subsubsection{Relation to other derivatives}
\begin{prop}
Let $f:\mathbb{R} \to \mathbb{R}$. Then the standard analytical notions of differentiability and derivative coincide.
\end{prop}
\begin{proof}
The standard notion of differentiability at $v \in \mathbb{R}$ requires the limit
$$ \lim\limits_{h \to 0} \frac{f(v + h) - f(v)}{h} $$
to exist. This is equivalent to requiring that the limit
$$ \lim\limits_{i \to \infty} \frac{f(v + h_i) - f(v)}{h_i} $$
exist exists for all sequences $\{h_i\}_{i=1}^{\infty}$ that converge to zero with $h_i \neq 0$ for all $i$.
Our notion of differentiability at $v \in \mathbb{R}$ requires the limit
$$ \lim\limits_{i \to \infty} \frac{f(v_i + a_i t_i) - f(v_i)}{a_i} = \left.\frac{df}{dV} \right|_{v} (t)$$
to exist for all sequences $\{v_i\}_{i=1}^{\infty}$ that converge to $v$, $\{a_i\}_{i=1}^{\infty}$ that converge to $0$ with $a_i \neq 0$ for all $i$, and $\{t_i\}_{i=1}^{\infty}$ that converges to some $t$.
Suppose $f$ is differentiable in the new sense. Choose $t_i = 1$ and $v_i = v$ for all $i$ we have
$$ \lim\limits_{i \to \infty} \frac{f(v + a_i) - f(v)}{a_i} = \left.\frac{df}{dV} \right|_{v} (1)$$
for all $\{a_i\}_{i=1}^{\infty}$ that converge to $0$ with $a_i \neq 0$ for all $i$. Therefore $f$ is differential in the standard sense.
Suppose $f$ is differentiable in the standard sense. Assuming, for now, that all sequences that appear at the denominator are never zero, we have
\begin{align*}
\lim\limits_{i \to \infty} \frac{f(v_i + a_i t_i) - f(v_i)}{a_i} &= \lim\limits_{i \to \infty} \frac{f(v + (v_i - v) + a_i t_i) - f(v + (v_i - v))}{a_i} \\
&= \lim\limits_{i \to \infty} \frac{f(v + (v_i - v) + a_i t_i) - f(v) + f(v) - f(v + (v_i - v))}{a_i} \\
&= \lim\limits_{i \to \infty} \left[\frac{f(v + (v_i - v) + a_i t_i) - f(v)}{a_i}
- \frac{f(v + (v_i - v)) - f(v)}{a_i} \right]\\
&= \lim\limits_{i \to \infty} \left[ \frac{f(v + (v_i - v) + a_i t_i) - f(v)}{(v_i - v) + a_i t_i} \frac{(v_i - v) + a_i t_i}{a_i} \right. \\
&- \left.\frac{f(v + (v_i - v)) - f(v)}{(v_i - v)} \frac{(v_i - v)}{a_i} \right] \\
&= \left.\frac{df}{dv} \right|_{v} \lim\limits_{i \to \infty} \left[ \frac{(v_i - v) + a_i t_i}{a_i} - \frac{(v_i - v)}{a_i}\right] = \left.\frac{df}{dv} \right|_{v} \lim\limits_{i \to \infty} t_i = \left.\frac{df}{dv} \right|_{v} t \\
\end{align*}
Note that left side of each product in the fourth line matches the standard definition derivative. In the one dimensional case, this is a numbers and it factors out of the limit.
\end{proof}
\begin{conj}
Let $V$ and $W$ be two normed vector spaces. Then the notion of Fr\'{e}chet derivative and the new derivative coincide.
\end{conj}
\begin{proof}
Recall the Fr\'{e}chet derivative of a function $f: V \to W$ exists at $x \in V$ if there is a linear function $\lambda: V \to W$ such that
\begin{equation}
\lim_{\lVert h \rVert_{V} \to 0} \frac{\lVert f(x + h) - f(x) - \lambda(h) \rVert_{W}}{\lVert h \rVert_{V}} = 0
\end{equation}
First, assume $f$ is differentiable in the Carcassi sense. Therefore, for every $v_i \to v$, $a_i \to 0$ and $t_i \to t$ we have
\begin{equation}
\begin{aligned}
&\lim_{i \to \infty} \frac{f(v_{i} + a_{i}t_{i}) - f(v_{i})}{a_{i}} = \left.\frac{df}{dV}\right|_{v}(t) \\
&\left( \lim_{i \to \infty} \frac{f(v_{i} + a_{i}t_{i}) - f(v_{i})}{a_{i}} \right) - \left.\frac{df}{dV}\right|_{v}(t) = \vec{0}_{W} \\
&\lim_{i \to \infty} \frac{f(v_{i} + a_{i}t_{i}) - f(v_{i})}{a_{i}} - \lim_{i \to \infty} \left.\frac{df}{dV}\right|_{v}(t_i) = \vec{0}_{W} \\
&\lim_{i \to \infty} \frac{f(v_{i} + a_{i}t_{i}) - f(v_{i}) - a_{i}\left.\frac{df}{dV}\right|_{v}(t_i)}{a_{i}} = \vec{0}_{W}
\end{aligned}
\end{equation}
since $\frac{df}{dV}$ is continuous. (TODO)
\begin{equation}
\begin{aligned}
&\frac{\lim_{i \to \infty} \frac{f(v_{i} + a_{i}t_{i}) - f(v_{i}) - a_{i}\left.\frac{df}{dV}\right|_{v}(t_i)}{a_{i}}}{\lVert t_i \rVert_{V}} = \vec{0}_{W}
\end{aligned}
\end{equation}
TODO
\end{proof}
\subsubsection{Partial derivatives}
TODO: still old version
Partial derivatives are recovered studying how differentials behave under vector space composition. If $f : V \to W$ is differentiable and $V = V_1 \times V_2$, we can ask for the map from a differential of $V_1$ to a differential of $W$ through $f$. This is exactly a variation of $V$ that keeps $V_2$ constant. Conceptually, given that $dv = (dv_1, dv_2)$ and the derivative is linear, we have $dw = \frac{df}{dV} dv = \frac{df}{dV} (dv_1, dv_2) = \frac{df}{dV} (dv_1, 0) + \frac{df}{dV} (0, dv_2) = \frac{df}{dV_1} dv_1 + \frac{df}{dV_2} dv_2$.
Suppose that $V = V_1 \times V_2$, then $dV = dV_1 \times dV_2$. This means that we can decompose a differential $dv= dv_1 + dv_2$ as the sum of two differentials in the respective spaces (mapped into the composite through the injection map). Suppose that $W = W_1 \times W_2$ and $f : V \to W$ is differentiable. Then we also have $dw= dw_1 + dw_2$ and we can study of $f$ maps differentials of $V_1$ and $V_2$ to differentials of $W_1$ and $W_2$. We will find $dw = dw_1 + dw_2 = \frac{dw_1}{dV} dv + \frac{dw_2}{dV} dv = \frac{\partial w_1}{\partial V_1} dv_1 + \frac{\partial w_1}{\partial V_2} dv_2 + \frac{\partial w_2}{\partial V_1} dv_1 + \frac{\partial w_2}{\partial V_2} dv_2$. If $U = \mathbb{R}^n$ and $V = \mathbb{R}^m$ we find the usual definitions.
\begin{prop}[Direct product and direct sum]
The differential space $dV$ of the direct product $V=\prod V_i$ of a family of vector spaces $V_i$ is the direct product $\prod dV_i$ of the respective differential spaces. The same is true for the direct sum.
\end{prop}
\begin{proof}
Let $v \in V$ and consider its halving sequence $\{(1/2)^i v\}_{i=1}^{\infty}$. Given that $V$ is a direct product, $v=(v_j)$ we have $\{(1/2)^i (v_j)\}_{i=1}^{\infty} = \{((1/2)^i v_j)\}_{i=1}^{\infty}$. Conversely, if we pick one vector for each space $V_j$ and construct their respective halving sequences, this becomes a halving sequence of a vector $v \in V$. Therefore the space of halving sequences over $V$ is the direct product of the halving sequences over the respective $V_j$. Given that the spaces of halving sequences are isomorphic to the respective differential spaces, the differential space of the direct product is the direct product of the differential spaces.
The same argument works for direct sum.
\end{proof}
\begin{defn}
Let $V$ and $W$ be two vector spaces and let $V=\prod V_i$ be the direct product of a family of vector spaces $V_i$. Let $f: V \to W$ be a differentiable map. The \textbf{partial derivative} of $f$ with respect to $W_i$, noted $\frac{\partial f}{\partial V_i}$, is the map from a differential of $V_i$ to the differential of $W$ through $f$. That is, $\frac{\partial f}{\partial V_i} (dv_i) = \frac{\partial f}{\partial V} \left( (0, ..., 0, dv_i, 0, ...) \right)$.
\end{defn}
\begin{remark}
Partial derivatives should be recoverable through vector space composition. Suppose that $V = V_1 \times V_2$, then $dV = dV_1 \times V_2$. This means that we can decompose a differential $dv= dv_1 + dv_2$ as the sum of two differentials in the respective spaces (mapped into the composite through the injection map). Suppose that $W = W_1 \times W_2$ and $f : V \to W$ is differentiable. Then we also have $dw= dw_1 + dw_2$ and we can study of $f$ maps differentials of $V_1$ and $V_2$ to differentials of $W_1$ and $W_2$. We will find $dw = dw_1 + dw_2 = \frac{dw_1}{dV} dv + \frac{dw_2}{dV} dv = \frac{\partial w_1}{\partial V_1} dv_1 + \frac{\partial w_1}{\partial V_2} dv_2 + \frac{\partial w_2}{\partial V_1} dv_1 + \frac{\partial w_2}{\partial V_2} dv_2$. If $U = \mathbb{R}^n$ and $V = \mathbb{R}^m$ we find the usual definitions.
\end{remark}
\begin{remark}
TODO: we could compare and contrast with other notions of generalized derivatives: \url{https://en.wikipedia.org/wiki/Fr%C3%A9chet_derivative} \url{https://en.wikipedia.org/wiki/Gateaux_derivative} \url{https://en.wikipedia.org/wiki/Hadamard_derivative}
\end{remark}
\section{Multivectors}
Blurb: vector spaces define only the notion of parallelism and ratio between two vectors that are parallel. With just this notion we can define $k$-vectors to define, instead of linear direction, planar direction and so on.
A vector space defines a notion of direction and ratios between displacements along the same direction. On top of this, we can define a notion of planar direction, a $2$-vector, and similarly a $k$-dimensional directional, a $k$-vector.
\begin{defn}
TODO: find a nice definition for the exterior algebra of vectors.
$\wedge : V^n \times V^m \to V^{n+m}$ such that:
\begin{description}
\item[Bilinear] $a \wedge(b + c) = a \wedge b + a \wedge c$ and $(b + c) \wedge a= b \wedge a + c \wedge a$
\item[Associative] $a \wedge (b \wedge c) = (a \wedge b) \wedge c$
\item[Antisymmetric] $a \wedge a = 0$
\end{description}
\end{defn}
\section{Linear functionals (on vector spaces)}
\begin{defn}
Let $V$ be a vector space. A $k$-differential is a differential of $k$-vectors.
\end{defn}
\begin{defn}
Let $V$ be a vector space. A differentiable $k$-surface of $V$ is a region $\sigma^k$ such that given $k+1$ sequences $\{ \{ {v}^j_i \}_{i=1}^{\infty}\}_{j=0}^{k}$ such that is ${v}^j_i \in \sigma^k$ for all $i$ and $j$, and that ${v}^j_i \to v$ for all $j$, the sequence $\{ ({v}^1_i - {v}^0_i) \wedge ({v}^2_i - {v}^0_i) \wedge ... \wedge ({v}^k_i - {v}^0_i) \}_{i=1}^{\infty}\}_{j=0}^{k}$ is a $k$-differential. Moreover, there exists one $k$-differential so constructed that has a non-zero tangent $k$-vector and all other $k$-differential have either the same tangent $k$-vector or the zero $k$-vector.
\end{defn}
\begin{remark}
The idea that this is enough to fix the dimensionality of the surface. In fact, if a neighborhood is lower dimensional, all tangent $K$-vectors must be zero; if a neighborhood is high dimensional, there will be two tangent $k$-vector that are different.
\end{remark}
\begin{defn}
A $k$-functional is a map $f_k : S^k \to \mathbb{R}$ such that, given a family $\{ \sigma^k_i\}_{i=1}^{\infty}$ for which $\sigma^k_i \cap \sigma^k_j = \emptyset$ for all $i$ and $j$, $f_k(\bigcup \sigma^k_i) = \sum f_k(\sigma^k_i)$.
\end{defn}
\begin{defn}
A $k$-functional $f_k : S^k \to \mathbb{R}$ is differentiable at $v$ if for every sequence $\{v_i\}_{i=1}^{\infty}$ that converges to $v$ and for every differential $\{a_i {t}^1_i \wedge {t}^2_i \wedge ... \wedge {t}^k_i \}_{i=1}^{\infty}$, the sequence $\{f_k(v_i, a_i {t}^1_i \wedge {t}^2_i \wedge ... \wedge {t}^k_i) \}_{i=1}^{\infty}$ is a differential with envelope $a_i$.
\end{defn}
Notes: similar ``derivatives'' applied to different objects.
\begin{itemize}
\item Standard derivative: $f : X \to \mathbb{R} - (\{v_i\}, \{a_i t_i\}) \to \{f(v_i +a_i t_i) - f(v_i)\} = a_i \left. \frac{df}{dx} \right|_{v}(t)$
\item Line functional: $w : \Lambda \to \mathbb{R} - (\{v_i\}, \{a_i t_i\}) \to \{w(l(v_i, a_i t_i))\} = a_i \left. \frac{dw}{d\lambda} \right|_{v}(t)$
\end{itemize}
\section{Linear functionals (on manifolds)}
Review notation. Let $M$ be a differentiable manifold of dimension $n$.
\begin{defn}
A $k$-surface is a $k$-dimensional smooth submanifold of $M$. We denote by $S^k$ the set of all $k$-surfaces of dimension $k$ and by $S = \bigcup_{k=0}^n S^k$ the set of all smooth surfaces of all dimensions.
\end{defn}
\begin{defn}
Given a $k$-surface $\sigma^k \in S^k$, the \textbf{boundary} of $\sigma^k$, denoted by $\partial\sigma^k \in S^{k-1}$ is the limit of varied coordinates. The \textbf{boundary operator} $\partial : S \to S$ is a map from a $k$-surface to its boundary. A surface is \textbf{closed} if has no boundary.
\end{defn}
\begin{coro}
Boundary of smooth surfaces are smooth surfaces. Boundaries do not have boundaries. That is, $\partial\partial \sigma^k = \emptyset$ for all $\sigma^k \in S^k$.
\end{coro}
\begin{defn}
A \textbf{$k$-functional} is a linear function of $k$-surfaces. That is, it is a function $f_k : S^k \to \mathbb{R}$ with the following properties:
\begin{description}
\item[Linear] $f_k(\sigma^k_1 \cup \sigma^k_2) = f_k(\sigma^k_1) + f_k(\sigma^k_2)$ for every $\sigma^k_1, \sigma^k_2 \in S^k$ such that $\sigma^k_1 \cap \sigma^k_2 = \emptyset$
\item[No contribution from boundary] $f_k(\sigma^k) = f_k(\sigma^k \setminus \partial \sigma^k)$ for every $\sigma^k \in S^k$
\item[Commutes with the limit] $\lim\limits_{i \to \infty} f_k(\sigma_i^k) = f_k(\lim\limits_{i \to \infty}\sigma_i^k)$
\end{description}
We denote by $F_k$ the set of all $k$-functionals of dimension $k$ and by $F = \bigcup_{k=0}^nF_k$ is the set of all $k$-functionals.
\end{defn}
\begin{coro}
Any $k$-functional applied to the empty set returns zero. That is, for any $f_k \in F$, $f_k(\emptyset) = 0$.
\end{coro}
\begin{defn}
The \textbf{zero $k$-functional}, noted $0_k \in F_k$, is the $k$-functional that always returns zero. That is, $0_k(\sigma^k) = 0$ for all $\sigma^k \in S^k$.
\end{defn}
\begin{defn}
Given a $k$-functional $f_k \in F_k$, the \textbf{boundary functional} $\partial f_k \in F_{k+1}$ is a $(k+1)$-functional that applies $f_k$ on the boundary. That is, $\partial f_k(\sigma^{k+1}) = f_k(\partial \sigma^{k+1})$.
\end{defn}
\begin{coro}
The boundary functional of the boundary functional is the zero functional. That is, for any $k$-functional $f_k \in F$, $\partial \partial f_k = 0_{k+2}$.
\end{coro}
\begin{proof}
$\partial \partial f_k (\sigma ^{f+2}) = \partial f_k (\partial \sigma ^{f+2}) = f_k (\partial \partial \sigma ^{f+2}) = f_k(\emptyset) = 0$
\end{proof}
\begin{defn}
A $k$-surface $\sigma^k \in S^k$ is \textbf{contractible} if it can be continuously shrunk to a point. That is, the inclusion map $\iota : \sigma^k \to X$ is null-homotopic.
\end{defn}
\begin{defn}
An \textbf{exact functional} is a $k$-functional that returns zero on all closed $k$-surfaces. That is, $f_k(\sigma^k) = 0$ for all $\sigma^k \in S^k$ such that $\partial\sigma^k = \emptyset$. A \textbf{closed functional} returns zero on all contractible closed surfaces.
\end{defn}
\begin{remark}
Names are chosen to agree with exact/closed forms... Should we find better names?
\end{remark}
\begin{prop}
Let $f \in F_k$ be an exact $k$-functional. Then there exists some $(k-1)$-functional $g \in F_{k-1}$ such that $f = \partial g$. We say $g$ is the \textbf{potential} of $f$.
\end{prop}
\begin{remark}
The aim here is to prove the theorem on finite surfaces, without using standard differentiable calculus.
As a model, we should use the standard proof used in physics for irrotational fields. Suppose we have an exact 1-functional, that is it gives zero for all closed lines. Then, one can show that two lines that share the same boundary must have the same value. Then pick a point and assign zero to that point. To any other point, assign the value given by the functional over a line that starts at the zero point and ends that the new point. The potential is given by those assignments.
The way to generalize is to realize that a $k$-surface is half a boundary of a $k+1$-surface. For example, a point is half a boundary of a line, which constitutes of 2 points. The boundary of a surface is a closed line, which can be understood as two lines that share the same boundary, but opposite orientation. Like the line integral can be understood as ``going from`` one point (i.e. half boundary) to the other, the surface integral can be understood as ``going from'' one line (i.e. half boundary) to the other.
For example, suppose we have an exact 2-functional, that is it gives zero for all closed surfaces. Then two surfaces that share the same boundary have the same value. Pick a reference point. Pick a family of lines such that they all start from the reference point, all end at different point (covering the whole space) and never form two paths to the same point. For example, in local coordinates, change one coordinate at a time (i.e. first increase the x, then increase the y, then the z, ...). Now pick a scalar function (a (2 - 2)-form) and assign to each line the difference at the boundary (this arbitrary choice is the equivalent of choosing the constant function in the previous case, and effectively maps to the choice of gauge). Given any other line, we can use the family to find two lines to form a closed loop. A closed loop identifies, which can now be given a value based on the 2-functional.
This should be generalizable with the following sketch. Take a set of surfaces $R \subset S^{k-1}$, called references, that:
\begin{enumerate}
\item includes the empty surface $\emptyset$
\item the union of a family of surface is in $R$
\item subsurface of a surface is in $R$
\item no two surfaces share the same boundary.
\end{enumerate}
(Note: some care needs to be done with the definition for $k=1$) Therefore, for any $\sigma^{k-1} \in S^{k-1}$ we find a unique $R(\sigma^{k-1}) \in R$ such that $\partial R(\sigma^{k-1}) = \partial \sigma^{k-1}$. That is, for any surface we find a reference surface with the same boundary, and together $\partial \sigma^{k-1} \cup R(\sigma^{k-1})$ they form a closed surface. Since $f$ is exact, $f(\sigma^k)$ depends only on the boundary of $\sigma^k$: two surfaces with equal boundary can be joined together to form a surface with no boundary, for which $f$ is zero. Therefore we can define $\hat{f}_{k-1} : S^{k-1} \to \mathbb{R}$ such that $\hat{f}_{k-1}(\sigma^{k-1}) = f_k(\hat{\sigma}^{k})$ where $\hat{\sigma}^{k}$ is any surface such that $\partial \hat{\sigma}^{k} = \sigma^{k-1}$. Now take an exact functional $v \in F_{k-1}$. Define $g \in F_{k-1}$ such that $g(\sigma^{k-1}) = \hat{f}_{k-1}(\sigma^{k-1} \cup R(\sigma^{k-1})) + v(R(\sigma^{k-1}))$. We have $\partial g(\sigma^{k}) = g(\partial \sigma^{k}) = \hat{f}_{k-1}(\partial \sigma^{k} \cup R(\partial \sigma^{k})) + v(R(\partial \sigma^{k}))$. Since $\partial \partial \sigma^k = \emptyset$, $R(\partial \sigma^{k}) = \emptyset$ because that is the only surface in $R$ with an empty boundary. Therefore $\partial g(\sigma^k) = \hat{f}_{k-1}(\partial \sigma^k \cup \emptyset) + v(\emptyset) = \hat{f}_{k-1}(\partial \sigma^k) = f_k(\sigma^k)$. Which means $\partial g = f$.
\end{remark}
\iffalse
\section{Differential forms}
This section needs to show that vectors and differential forms are infinitesimal counterparts of surfaces and functional.
\begin{defn}
TODO: Define a \textbf{$k$-vector} $v^k \in V^k$ as an infinitesimal parallelepiped. We note $V^k$ as the set of all vectors of rank k and $V = \bigcup_{k=0}^n V^k$ as the set of all $k$-vectors.
\end{defn}
\begin{defn}
The \textbf{wedge product} $\wedge : V^k\times V^j \to V^{k+j}$ returns the $(k+j)$-vector that represents the parallelepiped formed by the sides represented by the given $k$-vector and $j$-vector.
\end{defn}
\begin{remark}
Notation for a generic vector. Infinitesimal displacement $dP$ is a vector and can be expressed as: $dP = dx \frac{\partial P}{\partial x^i}$. We can set $e_i = \frac{\partial P}{\partial x^i}$ so $dP = dx^i e_i$.
Every infinitesimal $k$-surface $d\sigma^k$ can be expressed, in terms of the wedge product, as $d\sigma^k= dx^{i_1}dx^{i_2}...dx^{i_k}\frac{\partial P}{\partial x^1} \wedge \frac{\partial P}{\partial x^2} \wedge ... \wedge \frac{\partial P}{\partial x^k} = dx^{i_1}dx^{i_2}...dx^{i_k} e_1 \wedge e_2 \wedge ... \wedge e_k$.
Suppose we have a $k$-surface in terms of $k$ coordinates $s^j$. We will have a differentiable function $x^i = x^i(s^j)$ that maps the parametrization of the $k$-surface into the manifold. At each point $P$, we can write $dx^i = \frac{\partial x^i}{\partial s^j} ds^j$. Therefore we have $d\sigma^k = dx^{i_1}dx^{i_2}...dx^{i_k} e_1 \wedge e_2 \wedge ... \wedge e_k$.
For example:
\begin{align*}
x^i &= \{x,y,z\} \\
s^j &= \{\varphi, \theta\} \\
x &= \sin \varphi \cos \theta \\
y &= \sin \varphi \sin \theta \\
z &= \cos \varphi \\
d\sigma &= d\varphi d\theta (e_\varphi \wedge e_\theta) \\
&=d\varphi d\theta \left(\frac{\partial x}{\partial \varphi} e_x + \frac{\partial y}{\partial \varphi} e_y + \frac{\partial z}{\partial \varphi} e_z\right) \wedge \left(\frac{\partial x}{\partial \theta} e_x + \frac{\partial y}{\partial \theta} e_y + \frac{\partial z}{\partial \theta} e_z\right) \\
&=d\varphi d\theta (
\frac{\partial x}{\partial \varphi} e_x \wedge \frac{\partial x}{\partial \theta} e_x +
\frac{\partial x}{\partial \varphi} e_x \wedge \frac{\partial y}{\partial \theta} e_y +
\frac{\partial x}{\partial \varphi} e_x \wedge \frac{\partial z}{\partial \theta} e_z + \\
&\frac{\partial y}{\partial \varphi} e_y \wedge \frac{\partial x}{\partial \theta} e_x +
\frac{\partial y}{\partial \varphi} e_y \wedge \frac{\partial y}{\partial \theta} e_y +
\frac{\partial y}{\partial \varphi} e_y \wedge \frac{\partial z}{\partial \theta} e_z + \\
&\frac{\partial z}{\partial \varphi} e_z \wedge \frac{\partial x}{\partial \theta} e_x +
\frac{\partial z}{\partial \varphi} e_z \wedge \frac{\partial y}{\partial \theta} e_y +
\frac{\partial z}{\partial \varphi} e_z \wedge \frac{\partial z}{\partial \theta} e_z ) \\
&=d\varphi d\theta ((
\frac{\partial x}{\partial \varphi} \frac{\partial y}{\partial \theta} - \frac{\partial y}{\partial \varphi}\frac{\partial x}{\partial \theta}) e_x \wedge e_y + \\
& (\frac{\partial y}{\partial \varphi} \frac{\partial z}{\partial \theta} - \frac{\partial z}{\partial \varphi} \frac{\partial y}{\partial \theta}) e_y \wedge e_z + \\
&\frac{\partial z}{\partial \varphi} \frac{\partial x}{\partial \theta} - \frac{\partial x}{\partial \varphi} \frac{\partial z}{\partial \theta}) e_z \wedge e_x ) \\
&=d\varphi d\theta ((
\cos \varphi \cos \theta \sin \varphi \cos \theta - \cos \varphi \sin \theta \sin \varphi (-\sin \theta)) e_x \wedge e_y + \\
& (\cos \varphi \sin \theta 0 - (- \sin \varphi) \sin \varphi \cos \theta) e_y \wedge e_z + \\
& - \sin \varphi \sin \varphi (-\sin \theta) - \cos \varphi \cos \theta 0) e_z \wedge e_x ) \\
&=d\varphi d\theta (\cos \varphi \sin \varphi e_x \wedge e_y +
\sin^2 \varphi \cos \theta e_y \wedge e_z +
\sin^2 \varphi \sin \theta e_z \wedge e_x )
\end{align*}
\end{remark}
\begin{defn}
A \textbf{$k$-form} $\omega_k : V^k \to \mathbb{R}$ is a linear function of a vector. We note $\Omega_k$ as the set of all $k$-forms of dimension k and $\Omega = \cup_{k=0}^n\Omega_k$ as the set of all forms.
\end{defn}
\begin{prop}
TODO Show that every k-functional has a corresponding k-form, such that for $f_k = \int_{\sigma^k} \omega_k(d\sigma^k)$. In words, a linear functional applied over a k-surface is the same as an integral of a k-form over the infinitesimal parallelepipedes of that k-surface.
\end{prop}
\fi
\end{document}