<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>Deep-Learning on Victor</title>
    <link>https://nyxox-debug.github.io/nyxox/tags/deep-learning/</link>
    <description>Recent content in Deep-Learning on Victor</description>
    <generator>Hugo</generator>
    <language>en-us</language>
    <lastBuildDate>Tue, 24 Feb 2026 00:00:00 +0000</lastBuildDate>
    <atom:link href="https://nyxox-debug.github.io/nyxox/tags/deep-learning/index.xml" rel="self" type="application/xml" />
    <item>
      <title>Building a Neural Network Framework from Scratch</title>
      <link>https://nyxox-debug.github.io/nyxox/posts/projects/deep-learning-framework-scratch/</link>
      <pubDate>Tue, 24 Feb 2026 00:00:00 +0000</pubDate>
      <guid>https://nyxox-debug.github.io/nyxox/posts/projects/deep-learning-framework-scratch/</guid>
      <description>&lt;p&gt;I use PyTorch every day at work. It&amp;rsquo;s incredible - but I&amp;rsquo;ve always wondered: how does it actually work under the hood? How does &lt;code&gt;backward()&lt;/code&gt; actually compute gradients through a neural network?&lt;/p&gt;&#xA;&lt;p&gt;So I built my own minimal deep learning framework called &lt;strong&gt;Synap&lt;/strong&gt;. It&amp;rsquo;s written in C++ for performance, with Python bindings via pybind11. No external ML libraries - just raw tensor operations and automatic differentiation from scratch.&lt;/p&gt;</description>
    </item>
  </channel>
</rss>
