-
Notifications
You must be signed in to change notification settings - Fork 61
/
kernel.html
657 lines (578 loc) · 58.7 KB
/
kernel.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
<!DOCTYPE html><html><head>
<title>kernel</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<script type="text/x-mathjax-config">
MathJax.Hub.Config({"extensions":["tex2jax.js"],"jax":["input/TeX","output/HTML-CSS"],"messageStyle":"none","tex2jax":{"processEnvironments":false,"processEscapes":true,"inlineMath":[["$","$"],["\\(","\\)"]],"displayMath":[["$$","$$"],["\\[","\\]"]]},"TeX":{"extensions":["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"]},"HTML-CSS":{"availableFonts":["TeX"]}});
</script>
<script type="text/javascript" async src="file:////Users/samuel/.vscode/extensions/shd101wyy.markdown-preview-enhanced-0.5.0/node_modules/@shd101wyy/mume/dependencies/mathjax/MathJax.js" charset="UTF-8"></script>
<style>
/**
* prism.js Github theme based on GitHub's theme.
* @author Sam Clarke
*/
code[class*="language-"],
pre[class*="language-"] {
color: #333;
background: none;
font-family: Consolas, "Liberation Mono", Menlo, Courier, monospace;
text-align: left;
white-space: pre;
word-spacing: normal;
word-break: normal;
word-wrap: normal;
line-height: 1.4;
-moz-tab-size: 8;
-o-tab-size: 8;
tab-size: 8;
-webkit-hyphens: none;
-moz-hyphens: none;
-ms-hyphens: none;
hyphens: none;
}
/* Code blocks */
pre[class*="language-"] {
padding: .8em;
overflow: auto;
/* border: 1px solid #ddd; */
border-radius: 3px;
/* background: #fff; */
background: #f5f5f5;
}
/* Inline code */
:not(pre) > code[class*="language-"] {
padding: .1em;
border-radius: .3em;
white-space: normal;
background: #f5f5f5;
}
.token.comment,
.token.blockquote {
color: #969896;
}
.token.cdata {
color: #183691;
}
.token.doctype,
.token.punctuation,
.token.variable,
.token.macro.property {
color: #333;
}
.token.operator,
.token.important,
.token.keyword,
.token.rule,
.token.builtin {
color: #a71d5d;
}
.token.string,
.token.url,
.token.regex,
.token.attr-value {
color: #183691;
}
.token.property,
.token.number,
.token.boolean,
.token.entity,
.token.atrule,
.token.constant,
.token.symbol,
.token.command,
.token.code {
color: #0086b3;
}
.token.tag,
.token.selector,
.token.prolog {
color: #63a35c;
}
.token.function,
.token.namespace,
.token.pseudo-element,
.token.class,
.token.class-name,
.token.pseudo-class,
.token.id,
.token.url-reference .token.variable,
.token.attr-name {
color: #795da3;
}
.token.entity {
cursor: help;
}
.token.title,
.token.title .token.punctuation {
font-weight: bold;
color: #1d3e81;
}
.token.list {
color: #ed6a43;
}
.token.inserted {
background-color: #eaffea;
color: #55a532;
}
.token.deleted {
background-color: #ffecec;
color: #bd2c00;
}
.token.bold {
font-weight: bold;
}
.token.italic {
font-style: italic;
}
/* JSON */
.language-json .token.property {
color: #183691;
}
.language-markup .token.tag .token.punctuation {
color: #333;
}
/* CSS */
code.language-css,
.language-css .token.function {
color: #0086b3;
}
/* YAML */
.language-yaml .token.atrule {
color: #63a35c;
}
code.language-yaml {
color: #183691;
}
/* Ruby */
.language-ruby .token.function {
color: #333;
}
/* Markdown */
.language-markdown .token.url {
color: #795da3;
}
/* Makefile */
.language-makefile .token.symbol {
color: #795da3;
}
.language-makefile .token.variable {
color: #183691;
}
.language-makefile .token.builtin {
color: #0086b3;
}
/* Bash */
.language-bash .token.keyword {
color: #0086b3;
}
/* highlight */
pre[data-line] {
position: relative;
padding: 1em 0 1em 3em;
}
pre[data-line] .line-highlight-wrapper {
position: absolute;
top: 0;
left: 0;
background-color: transparent;
display: block;
width: 100%;
}
pre[data-line] .line-highlight {
position: absolute;
left: 0;
right: 0;
padding: inherit 0;
margin-top: 1em;
background: hsla(24, 20%, 50%,.08);
background: linear-gradient(to right, hsla(24, 20%, 50%,.1) 70%, hsla(24, 20%, 50%,0));
pointer-events: none;
line-height: inherit;
white-space: pre;
}
pre[data-line] .line-highlight:before,
pre[data-line] .line-highlight[data-end]:after {
content: attr(data-start);
position: absolute;
top: .4em;
left: .6em;
min-width: 1em;
padding: 0 .5em;
background-color: hsla(24, 20%, 50%,.4);
color: hsl(24, 20%, 95%);
font: bold 65%/1.5 sans-serif;
text-align: center;
vertical-align: .3em;
border-radius: 999px;
text-shadow: none;
box-shadow: 0 1px white;
}
pre[data-line] .line-highlight[data-end]:after {
content: attr(data-end);
top: auto;
bottom: .4em;
}html body{font-family:"Helvetica Neue",Helvetica,"Segoe UI",Arial,freesans,sans-serif;font-size:16px;line-height:1.6;color:#333;background-color:#fff;overflow:initial;box-sizing:border-box;word-wrap:break-word}html body>:first-child{margin-top:0}html body h1,html body h2,html body h3,html body h4,html body h5,html body h6{line-height:1.2;margin-top:1em;margin-bottom:16px;color:#000}html body h1{font-size:2.25em;font-weight:300;padding-bottom:.3em}html body h2{font-size:1.75em;font-weight:400;padding-bottom:.3em}html body h3{font-size:1.5em;font-weight:500}html body h4{font-size:1.25em;font-weight:600}html body h5{font-size:1.1em;font-weight:600}html body h6{font-size:1em;font-weight:600}html body h1,html body h2,html body h3,html body h4,html body h5{font-weight:600}html body h5{font-size:1em}html body h6{color:#5c5c5c}html body strong{color:#000}html body del{color:#5c5c5c}html body a:not([href]){color:inherit;text-decoration:none}html body a{color:#08c;text-decoration:none}html body a:hover{color:#00a3f5;text-decoration:none}html body img{max-width:100%}html body>p{margin-top:0;margin-bottom:16px;word-wrap:break-word}html body>ul,html body>ol{margin-bottom:16px}html body ul,html body ol{padding-left:2em}html body ul.no-list,html body ol.no-list{padding:0;list-style-type:none}html body ul ul,html body ul ol,html body ol ol,html body ol ul{margin-top:0;margin-bottom:0}html body li{margin-bottom:0}html body li.task-list-item{list-style:none}html body li>p{margin-top:0;margin-bottom:0}html body .task-list-item-checkbox{margin:0 .2em .25em -1.8em;vertical-align:middle}html body .task-list-item-checkbox:hover{cursor:pointer}html body blockquote{margin:16px 0;font-size:inherit;padding:0 15px;color:#5c5c5c;border-left:4px solid #d6d6d6}html body blockquote>:first-child{margin-top:0}html body blockquote>:last-child{margin-bottom:0}html body hr{height:4px;margin:32px 0;background-color:#d6d6d6;border:0 none}html body table{margin:10px 0 15px 0;border-collapse:collapse;border-spacing:0;display:block;width:100%;overflow:auto;word-break:normal;word-break:keep-all}html body table th{font-weight:bold;color:#000}html body table td,html body table th{border:1px solid #d6d6d6;padding:6px 13px}html body dl{padding:0}html body dl dt{padding:0;margin-top:16px;font-size:1em;font-style:italic;font-weight:bold}html body dl dd{padding:0 16px;margin-bottom:16px}html body code{font-family:Menlo,Monaco,Consolas,'Courier New',monospace;font-size:.85em !important;color:#000;background-color:#f0f0f0;border-radius:3px;padding:.2em 0}html body code::before,html body code::after{letter-spacing:-0.2em;content:"\00a0"}html body pre>code{padding:0;margin:0;font-size:.85em !important;word-break:normal;white-space:pre;background:transparent;border:0}html body .highlight{margin-bottom:16px}html body .highlight pre,html body pre{padding:1em;overflow:auto;font-size:.85em !important;line-height:1.45;border:#d6d6d6;border-radius:3px}html body .highlight pre{margin-bottom:0;word-break:normal}html body pre code,html body pre tt{display:inline;max-width:initial;padding:0;margin:0;overflow:initial;line-height:inherit;word-wrap:normal;background-color:transparent;border:0}html body pre code:before,html body pre tt:before,html body pre code:after,html body pre tt:after{content:normal}html body p,html body blockquote,html body ul,html body ol,html body dl,html body pre{margin-top:0;margin-bottom:16px}html body kbd{color:#000;border:1px solid #d6d6d6;border-bottom:2px solid #c7c7c7;padding:2px 4px;background-color:#f0f0f0;border-radius:3px}@media print{html body{background-color:#fff}html body h1,html body h2,html body h3,html body h4,html body h5,html body h6{color:#000;page-break-after:avoid}html body blockquote{color:#5c5c5c}html body pre{page-break-inside:avoid}html body table{display:table}html body img{display:block;max-width:100%;max-height:100%}html body pre,html body code{word-wrap:break-word;white-space:pre}}.markdown-preview{width:100%;height:100%;box-sizing:border-box}.markdown-preview .pagebreak,.markdown-preview .newpage{page-break-before:always}.markdown-preview pre.line-numbers{position:relative;padding-left:3.8em;counter-reset:linenumber}.markdown-preview pre.line-numbers>code{position:relative}.markdown-preview pre.line-numbers .line-numbers-rows{position:absolute;pointer-events:none;top:1em;font-size:100%;left:0;width:3em;letter-spacing:-1px;border-right:1px solid #999;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.markdown-preview pre.line-numbers .line-numbers-rows>span{pointer-events:none;display:block;counter-increment:linenumber}.markdown-preview pre.line-numbers .line-numbers-rows>span:before{content:counter(linenumber);color:#999;display:block;padding-right:.8em;text-align:right}.markdown-preview .mathjax-exps .MathJax_Display{text-align:center !important}.markdown-preview:not([for="preview"]) .code-chunk .btn-group{display:none}.markdown-preview:not([for="preview"]) .code-chunk .status{display:none}.markdown-preview:not([for="preview"]) .code-chunk .output-div{margin-bottom:16px}.scrollbar-style::-webkit-scrollbar{width:8px}.scrollbar-style::-webkit-scrollbar-track{border-radius:10px;background-color:transparent}.scrollbar-style::-webkit-scrollbar-thumb{border-radius:5px;background-color:rgba(150,150,150,0.66);border:4px solid rgba(150,150,150,0.66);background-clip:content-box}html body[for="html-export"]:not([data-presentation-mode]){position:relative;width:100%;height:100%;top:0;left:0;margin:0;padding:0;overflow:auto}html body[for="html-export"]:not([data-presentation-mode]) .markdown-preview{position:relative;top:0}@media screen and (min-width:914px){html body[for="html-export"]:not([data-presentation-mode]) .markdown-preview{padding:2em calc(50% - 457px + 2em)}}@media screen and (max-width:914px){html body[for="html-export"]:not([data-presentation-mode]) .markdown-preview{padding:2em}}@media screen and (max-width:450px){html body[for="html-export"]:not([data-presentation-mode]) .markdown-preview{font-size:14px !important;padding:1em}}@media print{html body[for="html-export"]:not([data-presentation-mode]) #sidebar-toc-btn{display:none}}html body[for="html-export"]:not([data-presentation-mode]) #sidebar-toc-btn{position:fixed;bottom:8px;left:8px;font-size:28px;cursor:pointer;color:inherit;z-index:99;width:32px;text-align:center;opacity:.4}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] #sidebar-toc-btn{opacity:1}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .md-sidebar-toc{position:fixed;top:0;left:0;width:300px;height:100%;padding:32px 0 48px 0;font-size:14px;box-shadow:0 0 4px rgba(150,150,150,0.33);box-sizing:border-box;overflow:auto;background-color:inherit}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .md-sidebar-toc::-webkit-scrollbar{width:8px}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .md-sidebar-toc::-webkit-scrollbar-track{border-radius:10px;background-color:transparent}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .md-sidebar-toc::-webkit-scrollbar-thumb{border-radius:5px;background-color:rgba(150,150,150,0.66);border:4px solid rgba(150,150,150,0.66);background-clip:content-box}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .md-sidebar-toc a{text-decoration:none}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .md-sidebar-toc ul{padding:0 1.6em;margin-top:.8em}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .md-sidebar-toc li{margin-bottom:.8em}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .md-sidebar-toc ul{list-style-type:none}html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .markdown-preview{left:300px;width:calc(100% - 300px);padding:2em calc(50% - 457px - 150px);margin:0;box-sizing:border-box}@media screen and (max-width:1274px){html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .markdown-preview{padding:2em}}@media screen and (max-width:450px){html body[for="html-export"]:not([data-presentation-mode])[html-show-sidebar-toc] .markdown-preview{width:100%}}html body[for="html-export"]:not([data-presentation-mode]):not([html-show-sidebar-toc]) .markdown-preview{left:50%;transform:translateX(-50%)}html body[for="html-export"]:not([data-presentation-mode]):not([html-show-sidebar-toc]) .md-sidebar-toc{display:none}
/* Please visit the URL below for more information: */
/* https://shd101wyy.github.io/markdown-preview-enhanced/#/customize-css */
.markdown-preview.markdown-preview h1,
.markdown-preview.markdown-preview h2,
.markdown-preview.markdown-preview h3,
.markdown-preview.markdown-preview h4,
.markdown-preview.markdown-preview h5,
.markdown-preview.markdown-preview h6 {
font-weight: bolder;
text-decoration-line: underline;
}
</style>
</head>
<body for="html-export">
<div class="mume markdown-preview ">
<div><h1 class="mume-header" id="kernels">Kernels</h1>
<h2 class="mume-header" id="definition">Definition</h2>
<p>When performing an arithmetic computation on a given image, one approach is to apply said computation in a neighborhood-by-neighborhood manner. This approach is very braodly termed as a <strong>convolution</strong>. In other words, convolution is an operation between every part of an image ("pixel neighborhood") and an operator ("kernel")<sup class="footnote-ref"><a href="#fn1" id="fnref1">[1]</a></sup><sup class="footnote-ref"><a href="#fn2" id="fnref2">[2]</a></sup>.</p>
<p>As the computation slides over each pixel neighborhood, we perform some arithmetic using the kernel, with the kernel typically being represented as a matrix or a fixed size array.</p>
<p>This kernel describes how the pixels in that neighborhood are combined or transformed to yield a corresponding output.</p>
<ul>
<li class="task-list-item">
<p><input type="checkbox" class="task-list-item-checkbox"> <a href="https://www.youtube.com/watch?v=WMmHcrX4Obg">Watch Kernel Convolution Explained Visually</a></p>
<iframe width="560" height="315" src="https://www.youtube.com/embed/WMmHcrX4Obg" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
</li>
</ul>
<h3 class="mume-header" id="mathematical-definitions">Mathematical Definitions</h3>
<p>You will notice from the video that the output image now has a <strong>shape that is smaller</strong> than the original input. Mathematically, the shape of this output would be:</p>
<p></p><div class="mathjax-exps">$$(\frac{X_m-M_i}{s_x})+1, (\frac{X_n-M_j}{s_y})+1$$</div><p></p>
<p>Where the input matrix has a size of <span class="mathjax-exps">$(X_m, X_n)$</span>, the kernel <span class="mathjax-exps">$M$</span> is of size <span class="mathjax-exps">$(M_i, M_j)$</span>, <span class="mathjax-exps">$s_x$</span> represents the stride over rows while <span class="mathjax-exps">$s_y$</span> represents the stride over columns.</p>
<p>In the linked video, we are sliding the kernel on both the x- and y- direction by 1 pixel at a time after each computation, giving a value of 1 for <span class="mathjax-exps">$s_x$</span> and <span class="mathjax-exps">$s_y$</span>. The input matrix in our video is of size 5, and our kernel is of size 3x3, giving us an output size of:</p>
<p></p><div class="mathjax-exps">$$(\frac{5-3}{1}+1, \frac{5-3}{1}+1)$$</div><p></p>
<p>Expressed mathematically, the full procedure as implemented in <code>opencv</code>looks like this for a convolution:</p>
<p><span class="mathjax-exps">$H(x, y) = \sum^{M_i-1}_{i=0}\sum^{M_j-1}_{j=0} I(x+i-a_i, y+j-a_j)K(i,j)$</span></p>
<p>We'll see the step-by-step given a kernel represented by matrix M:</p>
<p></p><div class="mathjax-exps">$$M = \begin{bmatrix} 1 & 2 & 0 \\ -1 & 3 & 0 \\ 0 & -1 & 0 \end{bmatrix}$$</div><p></p>
<ol>
<li>
<p>Place the kernel anchor (in this case, <span class="mathjax-exps">$3$</span>) on top of a determined pixel, with the rest of the kernel overlaying the corresponding local pixels in the image</p>
<ul>
<li>Typically the kernel anchor is the <em>central</em> of the kernel</li>
<li>Typically the "determined pixel" at the first step is the most upperleft region of the image</li>
</ul>
</li>
<li>
<p>Multiply the kernel coefficients by the corresponding image pixel values and sum the result</p>
</li>
<li>
<p>Replace the value at the location of the <em>anchor</em> in the input image with the result</p>
</li>
<li>
<p>Repeat the process for all pixels by sliding the kernel across the entire image, as specified by the stride</p>
</li>
</ol>
<h4 class="mume-header" id="a-note-on-padding">A Note on Padding</h4>
<p>Keen readers may observe from executing <code>meanblur_02.py</code> that the original dimension of our image is preserved <em>after</em> the convolution. This may seem unexpected given what we know about the formula to derive the output dimension.<br>
As it turns out, to preserve the dimension between the input and output images, a common technique known as "padding" is applied. From the documentation itself,</p>
<blockquote>
<p>For example, if you want to smooth an image using a Gaussian 3 * 3 filter, then, when processing the left-most pixels in each row, you need pixels to the left of them, that is, outside of the image. You can let these pixels be the same as the left-most image pixels (“replicated border” extrapolation method), or assume that all the non-existing pixels are zeros (“constant border” extrapolation method), and so on.</p>
</blockquote>
<p>The various border interpolation techniques available in <code>opencv</code> are as below (image boundaries are denoted with '|'):</p>
<ul>
<li>BORDER_REPLICATE:
<ul>
<li><code>aaaaaa|abcdefgh|hhhhhhh</code></li>
</ul>
</li>
<li>BORDER_REFLECT:
<ul>
<li><code>fedcba|abcdefgh|hgfedcb</code></li>
</ul>
</li>
<li>BORDER_REFLECT_101:
<ul>
<li><code>gfedcb|abcdefgh|gfedcba</code></li>
</ul>
</li>
<li>BORDER_WRAP:
<ul>
<li><code>cdefgh|abcdefgh|abcdefg</code></li>
</ul>
</li>
<li>BORDER_CONSTANT:
<ul>
<li><code>iiiiii|abcdefgh|iiiiiii</code> with some specified 'i'</li>
</ul>
</li>
</ul>
<p>It is useful to remember that OpenCV only supports convolving an image where the dimension of its output matches that of the input, so in almost all cases we need a way to extrapolate an extra layer of pixels around the borders. To specify an extrapolation method, supply the filtering method with an extra argument:</p>
<ul>
<li><code>cv2.GaussianBlur(..., borderType=BORDER_CONSTANT)</code></li>
</ul>
<p>Given what we've just learned, we can rewrite our formula to determine the output dimensions more generally and this time incorporating the padding technique:</p>
<p></p><div class="mathjax-exps">$$(\frac{X_m - M_i + 2P_i}{s_x})+1, (\frac{X_n-M_j + 2P_j}{s_y})+1$$</div><p></p>
<h5 class="mume-header" id="dive-deeper">Dive Deeper</h5>
<p>Before moving on to the next section, try and think through the following problem:</p>
<p>In the case on a 333x333 input image, with a strides of 1 using a kernel of size 5*5, what is the amount of zero-padding you should add to the borders of your image such that the output image is also 333x333?</p>
<ul>
<li class="task-list-item"><input type="checkbox" class="task-list-item-checkbox"> Done, I've understood the convolution operation!</li>
</ul>
<h2 class="mume-header" id="smoothing-and-blurring">Smoothing and Blurring</h2>
<p>To fully appreciate the idea of kernel convolutions, we'll see some real examples. We'll use the <code>cv2.filter2D</code> to convolve over our image using the following kernel:</p>
<p></p><div class="mathjax-exps">$$K = \frac{1}{5\cdot5} \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & 1 & 1 & 1 & 1 \end{bmatrix}$$</div><p></p>
<p>The kernel we specified above is equivalent to a <em>normalized box filter</em> of size 5. Having watched the video earlier, you may intuit that the outcome of such a convolution is that each pixel in the input image is replaced by the average of the 5x5 pixels around it. You are in fact correct. If you are skeptical and would rather see proof of it, we'll see proof of this in the <a href="#code-illustrations-mean-filtering">Code Illustrations: Mean Filtering</a> section of this coursebook.</p>
<p>Mathematically, by dividing our matrix by 25 (normalizing) we apply a control that stop our pixel values from being artificially increased since each pixel is now the weighted sum of its neighborhood.</p>
<blockquote>
<h4>A Note on Terminology</h4>
<h5>Kernels or Filters?</h5>
<p>When all we've been talking about is kernels, why is it that we're using the "filter" terminology in <code>opencv</code> code instead? That depends on the context. In the case of a convolutional neural network, <em>kernel</em> and <em>filters</em> are used interchangably: they both refer to the same thing.<br>
Some computer vision researchers have proposed to use a stricter definition, prefering to use the term "kernel" for a 2D array of weights, like our matrix above, and the term "filter" for the 3D structure of multiple kernels stacked together<sup class="footnote-ref"><a href="#fn3" id="fnref3">[3]</a></sup>, a concept we'll explore further in the Convolutional Neural Network part of this course.</p>
<h5>Correlations vs Convolutions</h5>
<p>Imaging specialists may point to the fact that <code>opencv</code> does not mirror / flip the kernel around the anchor point and hence doesn't qualify as a convolution under strict definitions of digital imaging theory. For a pure implementation of a "convolution", you should instead <code>scipy.ndimage.convolve(src, kernel)</code> instead or use <code>cv2.filter2D</code> in conjunction with a <code>flip</code> on the kernel<sup class="footnote-ref"><a href="#fn4" id="fnref4">[4]</a></sup>. This is in large part owed to the difference in scientific parlance adopted by the various scientific communities, a phenomenon more common than you'd expect. As an additional example, deep learning scientists usings convolutional neural network (CNN) generally refer to a non-flipped kernel when performing convolution.</p>
</blockquote>
<h4 class="mume-header" id="code-illustrations-mean-filtering">Code Illustrations: Mean Filtering</h4>
<ol>
<li><code>meanblur_01.py</code> demonstrates the construction of a 5x5 mean average filter using <code>np.ones((5,5))/25</code>. Because every coefficient is basically the same, this merely replaces the value of each pixel in our input image with the average of the values in its 5x5 neighborhood.</li>
</ol>
<pre data-role="codeBlock" data-info="py" class="language-python">img <span class="token operator">=</span> cv2<span class="token punctuation">.</span>imread<span class="token punctuation">(</span><span class="token string">"assets/canal.png"</span><span class="token punctuation">)</span>
mean_blur <span class="token operator">=</span> np<span class="token punctuation">.</span>ones<span class="token punctuation">(</span><span class="token punctuation">(</span><span class="token number">5</span><span class="token punctuation">,</span> <span class="token number">5</span><span class="token punctuation">)</span><span class="token punctuation">,</span> dtype<span class="token operator">=</span><span class="token string">"float32"</span><span class="token punctuation">)</span> <span class="token operator">*</span> <span class="token punctuation">(</span><span class="token number">1.0</span> <span class="token operator">/</span> <span class="token punctuation">(</span><span class="token number">5</span> <span class="token operator">**</span> <span class="token number">2</span><span class="token punctuation">)</span><span class="token punctuation">)</span>
smoothed_col <span class="token operator">=</span> cv2<span class="token punctuation">.</span>filter2D<span class="token punctuation">(</span>img<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> mean_blur<span class="token punctuation">)</span>
</pre><p>Alternatively, we can be explicit in our creation of the 5x5 kernel using <code>numpy</code>'s array:</p>
<pre data-role="codeBlock" data-info="py" class="language-python">mean_blur <span class="token operator">=</span> np<span class="token punctuation">.</span>array<span class="token punctuation">(</span>
<span class="token punctuation">[</span><span class="token punctuation">[</span><span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">[</span><span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">[</span><span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">[</span><span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">[</span><span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">,</span> <span class="token number">0.04</span><span class="token punctuation">]</span><span class="token punctuation">]</span><span class="token punctuation">)</span>
</pre><ol start="2">
<li>
<p>To be fully convinced that the mean filtering operation is doing what we expect it to do, we can inspect the pixel values before and after the convolution, to verify that the math checks out by hand. We do this in <code>meanblur_02.py</code>.</p>
<pre data-role="codeBlock" data-info="py" class="language-python">img <span class="token operator">=</span> cv2<span class="token punctuation">.</span>imread<span class="token punctuation">(</span><span class="token string">"assets/canal.png"</span><span class="token punctuation">)</span>
gray <span class="token operator">=</span> cv2<span class="token punctuation">.</span>cvtColor<span class="token punctuation">(</span>img<span class="token punctuation">,</span> cv2<span class="token punctuation">.</span>COLOR_BGR2GRAY<span class="token punctuation">)</span>
<span class="token keyword">print</span><span class="token punctuation">(</span><span class="token string-interpolation"><span class="token string">f'Gray: </span><span class="token interpolation"><span class="token punctuation">{</span>gray<span class="token punctuation">[</span><span class="token punctuation">:</span><span class="token number">5</span><span class="token punctuation">,</span> <span class="token punctuation">:</span><span class="token format-spec">5]</span><span class="token punctuation">}</span></span><span class="token string">'</span></span><span class="token punctuation">)</span>
<span class="token comment"># [[ 31 27 21 17 21]</span>
<span class="token comment"># [ 77 85 86 87 90]</span>
<span class="token comment"># [205 205 215 227 222]</span>
<span class="token comment"># [224 230 222 243 249]</span>
<span class="token comment"># [138 210 206 218 242]]</span>
<span class="token keyword">for</span> i <span class="token keyword">in</span> <span class="token builtin">range</span><span class="token punctuation">(</span><span class="token number">3</span><span class="token punctuation">)</span><span class="token punctuation">:</span>
newval <span class="token operator">=</span> np<span class="token punctuation">.</span><span class="token builtin">round</span><span class="token punctuation">(</span>np<span class="token punctuation">.</span>mean<span class="token punctuation">(</span>gray<span class="token punctuation">[</span><span class="token punctuation">:</span><span class="token number">5</span><span class="token punctuation">,</span> i<span class="token punctuation">:</span>i<span class="token operator">+</span><span class="token number">5</span><span class="token punctuation">]</span><span class="token punctuation">)</span><span class="token punctuation">)</span>
<span class="token keyword">print</span><span class="token punctuation">(</span><span class="token string-interpolation"><span class="token string">f'Mean of 25x25 pixel #</span><span class="token interpolation"><span class="token punctuation">{</span>i<span class="token operator">+</span><span class="token number">1</span><span class="token punctuation">}</span></span><span class="token string">: </span><span class="token interpolation"><span class="token punctuation">{</span>np<span class="token punctuation">.</span><span class="token builtin">int</span><span class="token punctuation">(</span>newval<span class="token punctuation">)</span><span class="token punctuation">}</span></span><span class="token string">'</span></span><span class="token punctuation">)</span>
<span class="token comment"># output:</span>
<span class="token comment"># Mean of 25x25 pixel #1: 152</span>
<span class="token comment"># Mean of 25x25 pixel #2: 158</span>
<span class="token comment"># Mean of 25x25 pixel #3: 160</span>
</pre><p>The code above shows that the output of such a convolution operation beginning at the top-left region of the image would be 152. As we slide along the horizontal direction and re-compute the mean of the neighborhood, we get 158. As we slide our kernel along the horizontal direction for a second time and re-compute the mean of the neighborhood we obtain the value of 160.</p>
<p>If you prefer you can verify these values by hand, using the raw pixel values from <code>gray[:5, :5]</code> (5x5 top-left region of the image).</p>
<pre data-role="codeBlock" data-info="py" class="language-python">mean_blur <span class="token operator">=</span> np<span class="token punctuation">.</span>ones<span class="token punctuation">(</span>KERNEL_SIZE<span class="token punctuation">,</span> dtype<span class="token operator">=</span><span class="token string">"float32"</span><span class="token punctuation">)</span> <span class="token operator">*</span> <span class="token punctuation">(</span><span class="token number">1.0</span> <span class="token operator">/</span> <span class="token punctuation">(</span><span class="token number">5</span> <span class="token operator">**</span> <span class="token number">2</span><span class="token punctuation">)</span><span class="token punctuation">)</span>
smoothed_gray <span class="token operator">=</span> cv2<span class="token punctuation">.</span>filter2D<span class="token punctuation">(</span>gray<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> mean_blur<span class="token punctuation">)</span>
<span class="token keyword">print</span><span class="token punctuation">(</span><span class="token string-interpolation"><span class="token string">f'Smoothed: </span><span class="token interpolation"><span class="token punctuation">{</span>smoothed_gray<span class="token punctuation">[</span><span class="token punctuation">:</span><span class="token number">5</span><span class="token punctuation">,</span> <span class="token punctuation">:</span><span class="token format-spec">5]</span><span class="token punctuation">}</span></span><span class="token string">'</span></span><span class="token punctuation">)</span>
<span class="token comment"># output:</span>
<span class="token comment"># [[122 123 125 127 128]</span>
<span class="token comment"># [126 127 128 131 132]</span>
<span class="token comment"># [148 149 152 158 160]</span>
<span class="token comment"># [177 179 184 196 202]</span>
<span class="token comment"># [197 199 204 222 229]]</span>
</pre><p>Notice that from the output of our mean-filter, the first anchor (center of the neighborhood) has transformed from 215 to 152, and the one to the right of it has transformed from 227 to 158, and so on. The math does work out and you can observe the blur effect directly by running <code>meanblur02.py</code>.</p>
</li>
<li>
<p>As it turns out, <code>opencv</code> provides a set of convenience functions to apply filtering onto our images. All the three approaches below yield the same output, as can be verified from the output pixel values after executing <code>meanblur_03.py</code>:</p>
<pre data-role="codeBlock" data-info="py" class="language-python"><span class="token comment"># approach 1</span>
mean_blur <span class="token operator">=</span> np<span class="token punctuation">.</span>ones<span class="token punctuation">(</span>KERNEL_SIZE<span class="token punctuation">,</span> dtype<span class="token operator">=</span><span class="token string">"float32"</span><span class="token punctuation">)</span> <span class="token operator">*</span> <span class="token punctuation">(</span><span class="token number">1.0</span> <span class="token operator">/</span> <span class="token punctuation">(</span><span class="token number">5</span> <span class="token operator">**</span> <span class="token number">2</span><span class="token punctuation">)</span><span class="token punctuation">)</span>
smoothed_gray <span class="token operator">=</span> cv2<span class="token punctuation">.</span>filter2D<span class="token punctuation">(</span>gray<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> mean_blur<span class="token punctuation">)</span>
<span class="token comment"># approach 2</span>
smoothed_gray <span class="token operator">=</span> cv2<span class="token punctuation">.</span>blur<span class="token punctuation">(</span>gray<span class="token punctuation">,</span> KERNEL_SIZE<span class="token punctuation">)</span>
<span class="token comment"># approach 3</span>
smoothed_gray <span class="token operator">=</span> cv2<span class="token punctuation">.</span>boxFilter<span class="token punctuation">(</span>gray<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> KERNEL_SIZE<span class="token punctuation">)</span>
</pre></li>
</ol>
<p>There are several types of kernels we can apply to achieve a blur filter on our image. The averaging filter method serves as a good introductory point because it is easy to intuit about, but it is good to know that <code>opencv</code> provides a collection of convenience functions, each being an implementation of some blurring filter. See <a href="#handy-kernels-for-image-processing">Handy kernels for image processing</a> for a list of smoothing kernels implemented in <code>opencv</code>.</p>
<h2 class="mume-header" id="role-in-convolutional-neural-networks">Role in Convolutional Neural Networks</h2>
<p>Earlier, it was said that kernels play a play integral role in all modern convolutional neural networks architecture. Using TensorFlow, one will rely on the <code>tf.nn.conv2d</code> function to perform a 2D convolution. The syntax looks like this:</p>
<pre data-role="codeBlock" data-info="py" class="language-python">tf<span class="token punctuation">.</span>nn<span class="token punctuation">.</span>conv2d<span class="token punctuation">(</span>
<span class="token builtin">input</span><span class="token punctuation">,</span>
<span class="token builtin">filter</span><span class="token punctuation">,</span>
strides<span class="token punctuation">,</span>
padding<span class="token punctuation">,</span>
use_cudnn_on_gpu<span class="token operator">=</span><span class="token boolean">None</span><span class="token punctuation">,</span>
data_format<span class="token operator">=</span><span class="token boolean">None</span><span class="token punctuation">,</span>
name<span class="token operator">=</span><span class="token boolean">None</span>
<span class="token punctuation">)</span>
</pre><p>Where:</p>
<ul>
<li><code>input</code> is assumed to be a tensor of shape <code>(batch, height, width, channels)</code> where <code>batch</code> is the number of images in a minibatch</li>
<li><code>filter</code> is a tensor of shape <code>(filter_height, filter_width, channels, out_channels)</code> that specifies the learnable weights for the nonlinear transformation learned in the convoliutional kernel</li>
<li><code>strides</code> contains the filter strides and is a list of length 4 (one for each input dimension)</li>
<li><code>padding</code> determines whether the input tensors are padded (with extra zeros) to guarantee the output <em>from the convolutional layer</em> has the same shape as the input. <code>padding="SAME"</code> adds padding to the input and <code>padding="VALID"</code> results in no padding</li>
</ul>
<p>Worthy to note is that the <code>input</code> and <code>filters</code> parameters follow what we've implemented using <code>opencv</code> thus far. When we're applying a filter like the mean blur example earlier, we slide our kernel along the <code>stride</code> of 1. In TensorFlow code, we would have set <code>strides=[1,1,1,1]</code> such that the kernel would slide by 1 unit across all 4 dimensions (x, y, channel, and image index).</p>
<p>Example of a Convolutional Neural Network architecture<sup class="footnote-ref"><a href="#fn5" id="fnref5">[5]</a></sup>:<br>
<img src="assets/c6archit.png" alt></p>
<p>Notice from the image that the dimension of our output from the first convolution layer is smaller (28x28) than its input (32x32) when we perform the operation without padding. <code>C1</code> and <code>C3</code> are examples of this in the above illustration.</p>
<p>In <code>S1</code> and <code>S2</code>, we're applying a max-pooling filter to down-sample our image representation, allowing our network to learn the parameters from the higher-order representations in each region of the image. An example operation is depicted below:</p>
<p><img src="assets/c6pooling.png" alt></p>
<h2 class="mume-header" id="handy-kernels-for-image-processing">Handy Kernels for Image Processing</h2>
<ul>
<li>Averaging Filter: <code>cv2.blur(img, KERNEL_SIZE)</code>
<ul>
<li>As seen in <code>meanblur_03.py</code>, replace each pixel with the <strong>mean</strong> of its neighboring pixels</li>
</ul>
</li>
<li>Median Filter: <code>cv2.medianBlur(img, KERNEL_SIZE)</code>
<ul>
<li>Replace each pixel with the <strong>median</strong> of its neighboring pixels</li>
</ul>
</li>
<li>Gaussian Filter: <code>cv2.GaussianBlur(img, KERNEL_SIZE, 0)</code></li>
<li>Bilateral Filter: <code>cv2.bilateralFilter(img, d, sigmaColor, sigmaSpace)</code>
<ul>
<li>An edge-preserving smoothing that aims to keep edges sharp</li>
</ul>
</li>
</ul>
<h4 class="mume-header" id="gaussian-filtering">Gaussian Filtering</h4>
<p>Gaussian filter deserves its own section given its prevalence in image processing, and is achieved by convolving each point in the input array (read: each pixel in our image) with a <em>Gaussian kernel</em> and take the sum of them to produce the output array.</p>
<p>If you remember your lessons from statistics, you may recall a 1D gaussian distribution looks like this:<br>
<img src="assets/normaldist.png" style="width: 50%; margin-left:20%;"></p>
<p>For completeness' sake, the code to graph the distribution above is in <code>utils/gaussiancurve.r</code>.</p>
<p>For a 1-dimensional image, the pixel located in the middle would be assigned the largest weight, with the weight of its neighbours decreasing as the spatial distance between them and the center pixel increases.</p>
<p>For the mathematically inclined, the graphed distribution above is generated from the Gaussian function<sup class="footnote-ref"><a href="#fn6" id="fnref6">[6]</a></sup>:</p>
<p></p><div class="mathjax-exps">$$g(x) = e^{\frac{-x^2}{2\sigma^2}}$$</div><p></p>
<p>Where <span class="mathjax-exps">$x$</span> is the spatial distance between the center pixel and the corresponding neighbor unit.</p>
<p>For a 1D kernel of size 7, each pixel would therefore be weighted accordingly:</p>
<p></p><div class="mathjax-exps">$$g(x) = \begin{bmatrix}.011 & .13 & .6 & 1 & .6 & .13 & .011\end{bmatrix}$$</div><p></p>
<p>The above should not be hard to intuit about, as if we refer back to the graphed distribution we can see that the center pixel (at position x=0) the <span class="mathjax-exps">$g(x)$</span> would evaluate to a value of <span class="mathjax-exps">$1$</span>.</p>
<pre data-role="codeBlock" data-info="py" class="language-python"><span class="token keyword">import</span> numpy <span class="token keyword">as</span> np
weights <span class="token operator">=</span> <span class="token punctuation">[</span><span class="token punctuation">]</span>
sd <span class="token operator">=</span> <span class="token number">1</span>
<span class="token keyword">for</span> i <span class="token keyword">in</span> <span class="token builtin">range</span><span class="token punctuation">(</span><span class="token number">4</span><span class="token punctuation">)</span><span class="token punctuation">:</span>
weights <span class="token operator">+=</span> <span class="token punctuation">[</span>np<span class="token punctuation">.</span><span class="token builtin">round</span><span class="token punctuation">(</span>np<span class="token punctuation">.</span>exp<span class="token punctuation">(</span><span class="token punctuation">(</span><span class="token operator">-</span>i<span class="token operator">**</span><span class="token number">2</span><span class="token punctuation">)</span><span class="token operator">/</span><span class="token punctuation">(</span><span class="token number">2</span><span class="token operator">*</span>sd<span class="token operator">**</span><span class="token number">2</span><span class="token punctuation">)</span><span class="token punctuation">)</span><span class="token punctuation">,</span><span class="token number">3</span><span class="token punctuation">)</span><span class="token punctuation">]</span>
<span class="token keyword">print</span><span class="token punctuation">(</span>weights<span class="token punctuation">)</span>
<span class="token comment"># output:</span>
<span class="token comment"># [1.0, 0.607, 0.135, 0.011]</span>
</pre><p>For a 2D kernel, the formula would take the form of:<br>
</p><div class="mathjax-exps">$$g(x,y) = e^{\frac{-(x^2+y^2)}{2\sigma^2}}$$</div><p></p>
<p>When we compare the output of a mean filter to a gaussian filter, as in the example script in <code>gaussianblur_01.py</code>, we can then observe the difference in output visually:</p>
<p><img src="assets/meanvsgaussian.png" alt></p>
<p>This should also come as little surprise, since the mean filter just replace each pixels with the average values of its neighboring pixels, essentially giving a coefficient of 1 (without normalized) to a grid of 5x5 pixels.</p>
<p>Where on the other hand, gaussian filters <strong>weigh pixels using a gaussian distribution</strong> (think: bell curve in a 2d space) around the center pixel such that farther pixels are given a lower coefficient than nearer ones.</p>
<h4 class="mume-header" id="sharpening-kernels">Sharpening Kernels</h4>
<p>The opposite of blurring would be sharpening. There are again several approaches to this, and we'll start by looking at specifically two of them.</p>
<p>The first approach relies on the familiar <code>cv2.filter2D()</code> function to perform the following kernel and is implemented in <code>sharpening_01.py</code>:<br>
</p><div class="mathjax-exps">$$K = \begin{bmatrix} -1 & -1 & -1 \\ -1 & 9 & -1 \\ -1 & -1 & -1 \end{bmatrix}$$</div><p></p>
<p>The outcome:<br>
<img src="assets/sharpen.png" alt></p>
<h5 class="mume-header" id="approximate-gaussian-kernel-for-sharpening">Approximate Gaussian Kernel for Sharpening</h5>
<p>We can apply the same principles behind a Gaussian kernel for sharpening operations (as opposed to blurring). The full script is in <code>sharpening_02.py</code> but the essential parts are as follow:</p>
<pre data-role="codeBlock" data-info="py" class="language-python">approx_gaussian <span class="token operator">=</span> <span class="token punctuation">(</span>
np<span class="token punctuation">.</span>array<span class="token punctuation">(</span>
<span class="token punctuation">[</span>
<span class="token punctuation">[</span><span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">[</span><span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">[</span><span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token number">8</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">[</span><span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token number">2</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">[</span><span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">]</span><span class="token punctuation">,</span>
<span class="token punctuation">]</span>
<span class="token punctuation">)</span><span class="token operator">/</span> <span class="token number">8.0</span>
<span class="token punctuation">)</span>
sharpen_col <span class="token operator">=</span> cv2<span class="token punctuation">.</span>filter2D<span class="token punctuation">(</span>img<span class="token punctuation">,</span> <span class="token operator">-</span><span class="token number">1</span><span class="token punctuation">,</span> approx_gaussian<span class="token punctuation">)</span>
</pre><p>Notice how this method uses an approximate Gaussian kernel and that the result is an overall more natural smoothing:<br>
<img src="assets/gaussiansharpen.png" alt></p>
<h5 class="mume-header" id="unsharp-masking">Unsharp Masking</h5>
<p>The second approach is known as "unsharp masking", derived from that fact that the technique uses a blurred, or "unsharp", negative image to create a mask of the original image<sup class="footnote-ref"><a href="#fn7" id="fnref7">[7]</a></sup>. This technique is one of the oldest tool in photographic processing (tracing back to 1930s) and popular tools such as Adobe Photoshop and GIMP have direct implementations of it named, appropriately, Unsharp Mask.</p>
<p>Lifted straight from the Wikipedia article itself, a "typical blending formula for unsharp masking is <strong>sharpened = original + (original - blurred) * amount</strong>". <strong>Amount</strong> represents how much contrast is added to the edges.</p>
<p>To rewrite the formula, we get:<br>
</p><div class="mathjax-exps">$$\begin{aligned} Sharpened & = O + (O-B) \cdot a \\ & = O + Oa - Ba \\ & = O (1+a) + B(-a)\end{aligned}$$</div><p></p>
<p>Where <span class="mathjax-exps">$a$</span> is the amount, <span class="mathjax-exps">$B$</span> is the blurred image (mask) and <span class="mathjax-exps">$O$</span> is the original image. The final form is convenient because we can plug it into <code>cv2.addWeighted</code> and get an output. From OpenCV's documentation, the function <code>addWeighted</code> calculates the weighted sum of two arrays as follows:<br>
</p><div class="mathjax-exps">$$dst(I) = saturate(src1(I) * alpha + src2(I) * beta + gamma)$$</div><p></p>
<p>When you perform the arithmetic above, you will find that the values (eg. <code>src1(I) * alpha</code> when alpha is > 1.5 will produce values greater than 255) may fall outside the range of 0 and 255. Saturation clips the value in a way that is synonymous to the following:</p>
<p></p><div class="mathjax-exps">$$Saturate(x) = min(max(round(r), 0), 255)$$</div><p></p>
<p>The following code demonstrates the unsharp masking technique:</p>
<pre data-role="codeBlock" data-info="py" class="language-python">img <span class="token operator">=</span> cv2<span class="token punctuation">.</span>imread<span class="token punctuation">(</span><span class="token string">"assets/sarpi.png"</span><span class="token punctuation">)</span>
amt <span class="token operator">=</span> <span class="token number">1.5</span>
blurred <span class="token operator">=</span> cv2<span class="token punctuation">.</span>GaussianBlur<span class="token punctuation">(</span>img<span class="token punctuation">,</span> <span class="token punctuation">(</span><span class="token number">5</span><span class="token punctuation">,</span><span class="token number">5</span><span class="token punctuation">)</span><span class="token punctuation">,</span> <span class="token number">10</span><span class="token punctuation">)</span>
unsharp <span class="token operator">=</span> cv2<span class="token punctuation">.</span>addWeighted<span class="token punctuation">(</span>img<span class="token punctuation">,</span> <span class="token number">1</span><span class="token operator">+</span>amt<span class="token punctuation">,</span> blurred<span class="token punctuation">,</span> <span class="token operator">-</span>amt<span class="token punctuation">,</span> <span class="token number">0</span><span class="token punctuation">)</span>
unsharp_manual <span class="token operator">=</span> np<span class="token punctuation">.</span>clip<span class="token punctuation">(</span>img <span class="token operator">*</span> <span class="token punctuation">(</span><span class="token number">1</span><span class="token operator">+</span>amt<span class="token punctuation">)</span> <span class="token operator">+</span> blurred <span class="token operator">*</span> <span class="token punctuation">(</span><span class="token operator">-</span>amt<span class="token punctuation">)</span><span class="token punctuation">,</span> <span class="token number">0</span><span class="token punctuation">,</span> <span class="token number">255</span><span class="token punctuation">)</span>
cv2<span class="token punctuation">.</span>imshow<span class="token punctuation">(</span><span class="token string">"Unsharp Masking"</span><span class="token punctuation">,</span> unsharp<span class="token punctuation">)</span>
</pre><p><img src="assets/unsharpsarpi.png" alt><br>
You can find the sample code for this in <code>unsharpmask_01.py</code> (using <code>addWeighted</code>) and in <code>unsharpmask_02.py</code> (manual calculation) respectively.</p>
<h2 class="mume-header" id="summary-and-key-points">Summary and Key Points</h2>
<p>Why go to such lengths on the mathematical ideas behind image filtering operations?</p>
<blockquote>
<p>Filtering is perhaps the most fundamental operation of image processing and computer vision. In the broadest sense of the term "filtering", the value of the filtered image at a given location is a function of the values of the input image in a small neighborhood of the same location.<sup class="footnote-ref"><a href="#fn8" id="fnref8">[8]</a></sup></p>
</blockquote>
<p>It is fundamental to a host of common image processing techniques, from enhancements (sharpening, denoise, increase / reduce contrast), to edge detection, and texture detection, and in the case of deep learning, feature detections.</p>
<p>To help with your recall, I made a simple illustration below:</p>
<p><img src="assets/gaussiankernel.png" alt></p>
<p>Whenever you're ready, move on to <code>edgedetect.md</code> to learn the essentials of edge detection using kernel operations.</p>
<h2 class="mume-header" id="references">References</h2>
<hr class="footnotes-sep">
<section class="footnotes">
<ol class="footnotes-list">
<li id="fn1" class="footnote-item"><p>Making your own linear filters, <a href="https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/filter_2d/filter_2d.html">OpenCV Documentation</a> <a href="#fnref1" class="footnote-backref">↩︎</a></p>
</li>
<li id="fn2" class="footnote-item"><p>Bradski, Kaehler, Learning OpenCV <a href="#fnref2" class="footnote-backref">↩︎</a></p>
</li>
<li id="fn3" class="footnote-item"><p>Stacks Exchange, <a href="https://stats.stackexchange.com/a/366940">https://stats.stackexchange.com/a/366940</a> <a href="#fnref3" class="footnote-backref">↩︎</a></p>
</li>
<li id="fn4" class="footnote-item"><p><a href="http://docs.opencv.org/modules/imgproc/doc/filtering.html#filter2d">OpenCV Documentation</a> <a href="#fnref4" class="footnote-backref">↩︎</a></p>
</li>
<li id="fn5" class="footnote-item"><p>R.Zadeh and B.Ramsundar, TensorFlow for Deep Learning, O'Reilly Media <a href="#fnref5" class="footnote-backref">↩︎</a></p>
</li>
<li id="fn6" class="footnote-item"><p>Wikipedia, Gaussian function, <a href="https://en.wikipedia.org/wiki/Gaussian_function">https://en.wikipedia.org/wiki/Gaussian_function</a> <a href="#fnref6" class="footnote-backref">↩︎</a></p>
</li>
<li id="fn7" class="footnote-item"><p>W.Fulton, A few scanning tips, Sharpening - Unsharp Mask <a href="#fnref7" class="footnote-backref">↩︎</a></p>
</li>
<li id="fn8" class="footnote-item"><p>C. Tomasi and R. Manduchi, "Bilateral Filtering for Gray and Color Images", Proceedings of the 1998 IEEE International Conference on Computer Vision, Bombay, India. <a href="#fnref8" class="footnote-backref">↩︎</a></p>
</li>
</ol>
</section>
</div>
</div>
<div class="md-sidebar-toc"><ul>
<li><a href="#kernels">Kernels</a>
<ul>
<li><a href="#definition">Definition</a>
<ul>
<li><a href="#mathematical-definitions">Mathematical Definitions</a>
<ul>
<li><a href="#a-note-on-padding">A Note on Padding</a>
<ul>
<li><a href="#dive-deeper">Dive Deeper</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
<li><a href="#smoothing-and-blurring">Smoothing and Blurring</a>
<ul>
<li><a href="#code-illustrations-mean-filtering">Code Illustrations: Mean Filtering</a></li>
</ul>
</li>
<li><a href="#role-in-convolutional-neural-networks">Role in Convolutional Neural Networks</a></li>
<li><a href="#handy-kernels-for-image-processing">Handy Kernels for Image Processing</a>
<ul>
<li><a href="#gaussian-filtering">Gaussian Filtering</a></li>
<li><a href="#sharpening-kernels">Sharpening Kernels</a>
<ul>
<li><a href="#approximate-gaussian-kernel-for-sharpening">Approximate Gaussian Kernel for Sharpening</a></li>
<li><a href="#unsharp-masking">Unsharp Masking</a></li>
</ul>
</li>
</ul>
</li>
<li><a href="#summary-and-key-points">Summary and Key Points</a></li>
<li><a href="#references">References</a></li>
</ul>
</li>
</ul>
</div>
<a id="sidebar-toc-btn">≡</a>
<script>
var sidebarTOCBtn = document.getElementById('sidebar-toc-btn')
sidebarTOCBtn.addEventListener('click', function(event) {
event.stopPropagation()
if (document.body.hasAttribute('html-show-sidebar-toc')) {
document.body.removeAttribute('html-show-sidebar-toc')
} else {
document.body.setAttribute('html-show-sidebar-toc', true)
}
})
</script>
</body></html>