400400</ li >
401401
402402 < li class ="md-nav__item ">
403- < a href ="#2023 -schedule " class ="md-nav__link ">
404- 2023 Schedule
403+ < a href ="#2024 -schedule " class ="md-nav__link ">
404+ 2024 Schedule
405405 </ a >
406406
407407</ li >
@@ -443,7 +443,7 @@ <h2 id="syllabus">Syllabus<a class="headerlink" href="#syllabus" title="Permanen
443443able to understand recent literature in deep learning, implement novel neural
444444network architectures, use and understand the PyTorch library in many ways, and
445445apply deep learning to different domains.</ p >
446- < h2 id ="2023 -schedule "> 2023 Schedule< a class ="headerlink " href ="#2023 -schedule " title ="Permanent link "> 🔗</ a > </ h2 >
446+ < h2 id ="2024 -schedule "> 2024 Schedule< a class ="headerlink " href ="#2024 -schedule " title ="Permanent link "> 🔗</ a > </ h2 >
447447< table >
448448< thead >
449449< tr >
@@ -454,45 +454,50 @@ <h2 id="2023-schedule">2023 Schedule<a class="headerlink" href="#2023-schedule"
454454</ thead >
455455< tbody >
456456< tr >
457- < td > 28 /11</ td >
457+ < td > 12 /11</ td >
458458< td > < a href ="ANN.html "> Artificial Neural Networks</ a > </ td >
459459< td > ANNs, backpropagation, Stochastic Gradient Descent</ td >
460460</ tr >
461461< tr >
462- < td > 29 /11</ td >
462+ < td > 12 /11</ td >
463463< td > < a href ="deep.html "> Deep Learning</ a > </ td >
464464< td > layers, convolution, architectures, training</ td >
465465</ tr >
466466< tr >
467- < td > 05/12 </ td >
467+ < td > 26/11 </ td >
468468< td > < a href ="vision.html "> Deep Learning for Computer Vision, pt 1</ a > </ td >
469469< td > Convolutional Neural Networks, satellite imagery</ td >
470470</ tr >
471471< tr >
472- < td > 05/12 </ td >
472+ < td > 26/11 </ td >
473473< td > < a href ="vision.html "> Deep Learning for Computer Vision, pt 2</ a > </ td >
474474< td > </ td >
475475</ tr >
476476< tr >
477- < td > 12 /12</ td >
478- < td > < a href ="GAN.html "> GANs </ a > </ td >
477+ < td > 03 /12</ td >
478+ < td > < a href ="GAN.html "> Image generation </ a > </ td >
479479< td > VAEs, GANs, and Diffusion Models</ td >
480480</ tr >
481481< tr >
482- < td > 19 /12</ td >
482+ < td > 09 /12</ td >
483483< td > < a href ="RNN.html "> RNNs</ a > </ td >
484484< td > Recurrent Neural Networks, LSTM, GRU</ td >
485485</ tr >
486486< tr >
487- < td > 19 /12</ td >
488- < td > < a href ="NLP.html "> NLP </ a > </ td >
489- < td > Natural Language Processing, Transformers</ td >
487+ < td > 10 /12</ td >
488+ < td > < a href ="NLP.html "> Transformers </ a > </ td >
489+ < td > Transformers</ td >
490490</ tr >
491491< tr >
492- < td > 09/01 </ td >
492+ < td > 17/12 </ td >
493493< td > < a href ="DR.html "> Dimensionality Reduction</ a > </ td >
494494< td > Autoencoders, t-SNE</ td >
495495</ tr >
496+ < tr >
497+ < td > 17/12</ td >
498+ < td > < a href ="NLP.html "> LLMs</ a > </ td >
499+ < td > Large Language Models</ td >
500+ </ tr >
496501</ tbody >
497502</ table >
498503
0 commit comments