mirror of
https://github.com/itme-brain/itme-brain.github.io.git
synced 2026-05-08 08:00:13 -04:00
Compare commits
6 commits
f214ff3ada
...
27d3f85728
| Author | SHA1 | Date | |
|---|---|---|---|
| 27d3f85728 | |||
| 89d37eb5a9 | |||
| d01e601dc8 | |||
| d451f64428 | |||
| 13f4b571c5 | |||
| b24921804a |
30 changed files with 1352 additions and 253 deletions
2
.gitignore
vendored
Normal file
2
.gitignore
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
.jekyll-cache
|
||||
_site
|
||||
4
CNAME
4
CNAME
|
|
@ -1,3 +1 @@
|
|||
ramos.codes
|
||||
www.ramos.codes
|
||||
bryan.ramos.codes
|
||||
ramos.codes
|
||||
12
_config.yml
12
_config.yml
|
|
@ -1 +1,11 @@
|
|||
include: [".well-known"]
|
||||
title: Bryan Ramos
|
||||
description: Software & Systems Engineer
|
||||
url: "https://ramos.codes"
|
||||
|
||||
include:
|
||||
- ".well-known"
|
||||
|
||||
permalink: /blog/:year/:month/:day/:title/
|
||||
|
||||
markdown: kramdown
|
||||
highlighter: rouge
|
||||
4
_includes/footer.html
Normal file
4
_includes/footer.html
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
<footer>
|
||||
<a href="https://git.ramos.codes" title="Forgejo"><i class="fa-solid fa-code-branch"></i></a>
|
||||
<a href="{{ '/feed.xml' | relative_url }}" title="RSS Feed"><i class="fa-solid fa-rss"></i></a>
|
||||
</footer>
|
||||
40
_includes/head.html
Normal file
40
_includes/head.html
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
<meta charset="UTF-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
<title>{% if page.title %}{{ page.title }} | {{ site.title }}{% else %}{{ site.title }} | {{ site.description }}{% endif %}</title>
|
||||
<meta name="description" content="{% if page.description %}{{ page.description }}{% else %}{{ site.description }}{% endif %}">
|
||||
<meta name="author" content="{{ site.title }}">
|
||||
<link rel="canonical" href="{{ page.url | absolute_url }}">
|
||||
<meta name="theme-color" content="#171e26">
|
||||
|
||||
<!-- Open Graph / LinkedIn / Facebook -->
|
||||
<meta property="og:type" content="{% if page.layout == 'post' %}article{% else %}website{% endif %}">
|
||||
<meta property="og:site_name" content="{{ site.title }}">
|
||||
<meta property="og:url" content="{{ page.url | absolute_url }}">
|
||||
<meta property="og:title" content="{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}">
|
||||
<meta property="og:description" content="{% if page.description %}{{ page.description }}{% else %}{{ site.description }}{% endif %}">
|
||||
<meta property="og:image" content="{{ '/assets/headshot.jpeg' | absolute_url }}">
|
||||
|
||||
<!-- Twitter -->
|
||||
<meta name="twitter:card" content="summary">
|
||||
<meta name="twitter:title" content="{% if page.title %}{{ page.title }}{% else %}{{ site.title }}{% endif %}">
|
||||
<meta name="twitter:description" content="{% if page.description %}{{ page.description }}{% else %}{{ site.description }}{% endif %}">
|
||||
<meta name="twitter:image" content="{{ '/assets/headshot.jpeg' | absolute_url }}">
|
||||
|
||||
<!-- Theme script: runs before CSS to prevent flash of wrong theme -->
|
||||
<script>
|
||||
(function() {
|
||||
var stored = localStorage.getItem('theme');
|
||||
var prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
|
||||
document.documentElement.setAttribute('data-theme', stored || (prefersDark ? 'dark' : 'light'));
|
||||
})();
|
||||
</script>
|
||||
|
||||
<!-- Google Fonts: Inter -->
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap">
|
||||
|
||||
<link rel="stylesheet" href="{{ '/css/styles.css' | relative_url }}">
|
||||
<script src="https://kit.fontawesome.com/f26d369dc4.js" crossorigin="anonymous"></script>
|
||||
33
_includes/header.html
Normal file
33
_includes/header.html
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
<header>
|
||||
{% unless page.url == '/' %}
|
||||
<a href="{{ '/' | relative_url }}" class="home-btn" aria-label="Home" title="Home">
|
||||
<i class="fa-solid fa-house"></i>
|
||||
</a>
|
||||
{% endunless %}
|
||||
|
||||
<button
|
||||
class="theme-toggle"
|
||||
onclick="(function(){
|
||||
var next = document.documentElement.getAttribute('data-theme') === 'light' ? 'dark' : 'light';
|
||||
document.documentElement.setAttribute('data-theme', next);
|
||||
localStorage.setItem('theme', next);
|
||||
document.documentElement.classList.add('theme-transitions-enabled');
|
||||
})()"
|
||||
aria-label="Toggle color theme"
|
||||
title="Toggle color theme"
|
||||
>
|
||||
<i class="fa-solid fa-moon icon-moon"></i>
|
||||
<i class="fa-solid fa-sun icon-sun"></i>
|
||||
</button>
|
||||
|
||||
<img src="{{ '/assets/headshot.jpeg' | relative_url }}" alt="Bryan Ramos" class="pfp">
|
||||
<h1>Bryan Ramos</h1>
|
||||
<p class="header-tagline">Software & Systems Engineer</p>
|
||||
|
||||
<ul class="nav-links">
|
||||
<li><a href="mailto:bryan@ramos.codes"><i class="fa-solid fa-envelope"></i><span>Email</span></a></li>
|
||||
<li><a href="https://github.com/itme-brain"><i class="fa-brands fa-github"></i><span>GitHub</span></a></li>
|
||||
<li><a href="https://www.linkedin.com/in/bryan-ramos-ab467228a/"><i class="fa-brands fa-linkedin-in"></i><span>LinkedIn</span></a></li>
|
||||
<li><a href="{{ '/blog' | relative_url }}"><i class="fa-solid fa-pen-nib"></i><span>Blog</span></a></li>
|
||||
</ul>
|
||||
</header>
|
||||
11
_layouts/base.html
Normal file
11
_layouts/base.html
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
{% include head.html %}
|
||||
</head>
|
||||
<body>
|
||||
{% include header.html %}
|
||||
{{ content }}
|
||||
{% include footer.html %}
|
||||
</body>
|
||||
</html>
|
||||
18
_layouts/blog.html
Normal file
18
_layouts/blog.html
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
layout: base
|
||||
---
|
||||
<div class="blog-list">
|
||||
{% if site.posts.size == 0 %}
|
||||
<p style="color: var(--color-text-secondary);">No posts yet.</p>
|
||||
{% else %}
|
||||
{% for post in site.posts %}
|
||||
<div class="post-preview">
|
||||
<span class="post-date">{{ post.date | date: "%B %-d, %Y" }}</span>
|
||||
<a href="{{ post.url | relative_url }}">{{ post.title }}</a>
|
||||
{% if post.description %}
|
||||
<p class="post-description">{{ post.description }}</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</div>
|
||||
4
_layouts/page.html
Normal file
4
_layouts/page.html
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
layout: base
|
||||
---
|
||||
{{ content }}
|
||||
15
_layouts/post.html
Normal file
15
_layouts/post.html
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
---
|
||||
layout: base
|
||||
---
|
||||
<article class="post">
|
||||
<header class="post-header">
|
||||
<h1>{{ page.title }}</h1>
|
||||
<time class="post-date" datetime="{{ page.date | date: '%Y-%m-%d' }}">{{ page.date | date: "%B %-d, %Y" }}</time>
|
||||
</header>
|
||||
<div class="post-content">
|
||||
{{ content }}
|
||||
</div>
|
||||
<footer class="post-nav">
|
||||
<a href="{{ '/blog' | relative_url }}">← Back to Blog</a>
|
||||
</footer>
|
||||
</article>
|
||||
56
_posts/2023-02-28-setting-up-pgp.md
Normal file
56
_posts/2023-02-28-setting-up-pgp.md
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
layout: post
|
||||
title: "Setting Up PGP: Why Encrypted Communication Still Matters"
|
||||
date: 2023-02-28
|
||||
description: "A practical look at PGP, why I finally got around to setting up my own key, and why you probably should too."
|
||||
tags: [security, pgp, privacy]
|
||||
---
|
||||
|
||||
I've been putting off setting up a PGP key for years. It always felt like one of those things that lived in the intersection of "I should do this" and "how many people actually email me sensitive information." The answer to the second question is not many, but that's kind of beside the point.
|
||||
|
||||
This week I finally did it, published my public key on this site, and wanted to write down why I think it still matters in 2023.
|
||||
|
||||
## What PGP Actually Does
|
||||
|
||||
PGP (Pretty Good Privacy) is an encryption standard that lets two people communicate privately, even over completely untrusted channels like email. The core idea is simple: you generate a key pair. One key is public and you give it to anyone who might want to send you something private. The other key is private and never leaves your machine.
|
||||
|
||||
When someone wants to send you an encrypted message, they use your public key to lock it. Only your private key can open it. No server in the middle, no account needed, no company storing your messages.
|
||||
|
||||
It also works in reverse for signatures. You can sign something with your private key and anyone with your public key can verify the signature came from you. This is useful for verifying software downloads, signing git commits, or just proving a message wasn't tampered with.
|
||||
|
||||
## Why It Feels Abandoned (And Why That's Wrong)
|
||||
|
||||
The honest reason most people haven't set up PGP is that the tooling is rough and the UX has barely changed since the 90s. Keyservers are a mess, key discovery is unreliable, and the average person isn't going to paste an ASCII armor block into a web form to send their friend a message.
|
||||
|
||||
Signal exists. Matrix exists. These are genuinely better tools for real-time encrypted chat with non-technical people.
|
||||
|
||||
But email isn't going away, and there are specific situations where PGP is still the right tool. Submitting a security vulnerability report. Communicating with people who run their own mail servers. Signing software releases. Verifying the identity of someone you're meeting for the first time online.
|
||||
|
||||
The other thing PGP gives you that Signal doesn't is longevity and portability. Your key lives on your hardware. No company can deplatform you, no app can be removed from an app store, no terms of service can change. It's just math.
|
||||
|
||||
## Getting Started
|
||||
|
||||
The quickest path on Linux is through GPG:
|
||||
|
||||
```bash
|
||||
gpg --full-generate-key
|
||||
```
|
||||
|
||||
Choose RSA 4096 or Ed25519. Give it your email address. Set a passphrase. That's your key.
|
||||
|
||||
To export your public key so you can share it:
|
||||
|
||||
```bash
|
||||
gpg --armor --export your@email.com
|
||||
```
|
||||
|
||||
Paste that block on your website, upload it to keys.openpgp.org, or just email it to people. Once someone has your public key, they can encrypt messages that only you can read.
|
||||
|
||||
I've posted mine here: [PGP key](/assets/public.key)
|
||||
If you're sending me anything sensitive, please use it.
|
||||
|
||||
## One More Thing
|
||||
|
||||
Beyond privacy, generating your own key is a small but meaningful act of taking responsibility for your own security infrastructure. You're not trusting a company to hold your keys. You're not hoping an app stays funded and maintained. The cryptography has been audited for decades and it works.
|
||||
|
||||
That's worth the afternoon it takes to set up.
|
||||
41
_posts/2023-04-08-why-im-paying-attention-to-nostr.md
Normal file
41
_posts/2023-04-08-why-im-paying-attention-to-nostr.md
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
layout: post
|
||||
title: "Why I'm Paying Attention to Nostr"
|
||||
date: 2023-04-08
|
||||
description: "Nostr is rough around the edges and the user base is tiny. I'm watching it anyway."
|
||||
tags: [nostr, decentralization, social-media, bitcoin]
|
||||
---
|
||||
|
||||
I set up my Nostr identity a few weeks ago. It took longer than it should have, involved too many copy-pasted hex strings, and the client I ended up settling on still crashes occasionally. I'm writing about it anyway because I think the underlying idea is interesting enough to track.
|
||||
|
||||
## What Nostr Is
|
||||
|
||||
Nostr stands for "Notes and Other Stuff Transmitted by Relays." The name is intentionally generic because the protocol is intentionally minimal. At its core it's just a specification for cryptographically signed JSON messages that get broadcast to relay servers. Any client can connect to any relay. Any relay can store any message. The only identity system is a public/private key pair.
|
||||
|
||||
There's no company, no central server, no account to create. Your identity is your key. Your posts are signed by that key. Relays can choose what to store and what to drop, but they can't forge your signature or impersonate you.
|
||||
|
||||
## Why This Feels Different
|
||||
|
||||
I've been on Twitter since around 2010. I've watched it go through multiple cycles of policy changes, API lockdowns, and relationship rewrites between the company and its users. The pattern is always the same: the platform grows, the platform becomes valuable, the platform starts extracting value from users and developers who helped make it valuable.
|
||||
|
||||
Mastodon tried to solve this with federation. The problem is that federation still means trusting whoever runs your instance. You're one admin burnout or one bad actor away from your data disappearing or your account being frozen. The ActivityPub model also has a moderation coordination problem where instances end up playing a constant defederation game.
|
||||
|
||||
Nostr's approach is different. Because your identity is a key you control, no relay operator can take your identity away. You can switch relays, run your own relay, or use multiple relays simultaneously. The protocol is simple enough that there are now dozens of clients across every platform, and they all interoperate because they're all just reading and writing the same JSON format.
|
||||
|
||||
## The Honest Caveats
|
||||
|
||||
It's early and it shows. Key management is a real problem. If you lose your private key, you lose your identity and there's no recovery. Most clients have no good answer for this. The onboarding experience for non-technical users is rough.
|
||||
|
||||
The user base right now skews heavily toward Bitcoin and cypherpunk communities. That's fine as a starting point but it limits the conversational surface area considerably. If you're not interested in those topics, your feed is going to be thin.
|
||||
|
||||
Spam and discovery are unsolved problems. Without central moderation, finding good content requires knowing who to follow, and knowing who to follow requires already being embedded in communities that point you toward people worth following.
|
||||
|
||||
## Why I'm Still Here
|
||||
|
||||
Despite all that, I set up NIP-05 verification on this domain (which is why you can find me as `bryan@ramos.codes` on Nostr clients that support it) and I'm going to keep posting. The bet I'm making is that the protocol has the right properties to survive even if individual clients and relays come and go.
|
||||
|
||||
The thing that keeps bringing me back is the simplicity. I can read the spec in an afternoon. I can run a relay on a small VPS. I can write a client in a weekend if I wanted to. That kind of simplicity is usually a durable property in protocols.
|
||||
|
||||
Whether it reaches critical mass or stays a small technical community, I don't know. But the design is right in ways that matter.
|
||||
|
||||
Find me on Nostr: `npub17374whevgs040xkd48gr99g0xmpxd9snqt57dsfvtp0jcjt8yjeq49rdyt`
|
||||
39
_posts/2023-08-12-twitter-becomes-x.md
Normal file
39
_posts/2023-08-12-twitter-becomes-x.md
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
layout: post
|
||||
title: "Twitter Becomes X: The Case for Open Protocols"
|
||||
date: 2023-08-12
|
||||
description: "Yesterday Twitter became X. It doesn't really matter, and that's the whole problem."
|
||||
tags: [social-media, nostr, decentralization, twitter]
|
||||
---
|
||||
|
||||
Yesterday Twitter became X. The bird is gone, the brand is gone, and a social network that defined a decade of internet culture is now named after a PayPal spinoff from 2000. The discourse around this has been predictably loud, but I want to talk about something that gets less attention: why it doesn't actually matter what they rename it, and why that's the most damning thing I can say about the whole situation.
|
||||
|
||||
## You Don't Own Your Presence There
|
||||
|
||||
When Twitter dies, or transforms into something unrecognizable, everyone who built an audience there discovers the same thing: they were building on rented land. The followers, the links pointing at their profile, the years of posts — none of it is theirs in any meaningful sense. The platform owns the relationships. The platform owns the distribution.
|
||||
|
||||
This is not a new observation. It's been said every time a major platform has pivoted, died, or changed its terms. MySpace, Tumblr, Vine, Google+. We watched it happen and we kept building on closed platforms anyway because that's where the people were.
|
||||
|
||||
## What Actually Changed This Year
|
||||
|
||||
The rebrand is cosmetic. What's more significant is the systematic destruction of the API ecosystem. Third-party clients are gone. The API costs that killed them were not a mistake or a miscalculation. They were a policy decision to force everyone through official clients that can be monetized and monitored.
|
||||
|
||||
The rate limits on reading. The removal of the free tier. The requirement to pay for basic functionality that was free for a decade. Each of these individually could be explained away. Together they paint a clear picture of a platform that has decided its relationship with developers and power users is adversarial.
|
||||
|
||||
The people building on this platform subsidized its growth. They got API access and a network in return. That deal has been unilaterally cancelled.
|
||||
|
||||
## The Protocol Alternative
|
||||
|
||||
I've been on Nostr for a few months now. I wrote about it in April. It's still rough and the user base is still small, but this week it became significantly more relevant to me.
|
||||
|
||||
The difference is fundamental. On Nostr, I hold my own private key. My identity can't be revoked by a board decision or a new CEO. My posts are signed by me and can be verified by anyone without trusting a central server. If a relay goes down, I switch to another relay and my identity goes with me, intact.
|
||||
|
||||
This is what a protocol-based approach actually buys you. You're not at the mercy of a company's product roadmap. The clients and relays are interchangeable components. The thing that actually matters, the cryptographic identity and the content, lives with you.
|
||||
|
||||
## The Practical Reality
|
||||
|
||||
I'm not naive about where the people are. Most of the interesting conversations I've had online over the last decade happened on Twitter, and most of those people are not moving to Nostr anytime soon. Network effects are real and powerful and they don't care about your principles.
|
||||
|
||||
But I'm done optimizing my digital presence for platforms I don't control. X can do whatever it wants with its rebrand. I'll keep using it until I don't, and I won't build anything meaningful there that I'd be upset to lose.
|
||||
|
||||
The interesting question is what comes after centralized social media. I don't know if Nostr is the answer. But a signed JSON message sent to a relay you can run yourself is at least asking the right questions.
|
||||
65
_posts/2024-06-15-real-time-linux.md
Normal file
65
_posts/2024-06-15-real-time-linux.md
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
---
|
||||
layout: post
|
||||
title: "Real-Time Linux"
|
||||
date: 2024-06-15
|
||||
description: "Most Linux systems are good enough. Some systems have strict timing deadlines."
|
||||
tags: [linux, real-time, systems, kernel]
|
||||
---
|
||||
|
||||
When most people say a system is "fast," they mean it has high throughput. It processes a lot of data per second, or pages load quickly, or builds finish in under a minute. That's a useful property and it's what most software optimization work is aimed at.
|
||||
|
||||
Real-time systems are optimizing for something different. They don't care as much about average performance. They care about the worst case. The guarantee they need is not "this will usually respond in 10ms" but "this will always respond in under 1ms, every single time, no exceptions."
|
||||
|
||||
That's a fundamentally harder problem, and it's one I work with regularly.
|
||||
|
||||
## The Problem with Standard Linux
|
||||
|
||||
Linux is a general-purpose operating system. Its scheduler is designed to give all processes a fair share of CPU time, to maximize throughput, and to stay responsive under heavy load. These are good goals for a desktop or a server.
|
||||
|
||||
The problem is that achieving fairness and throughput sometimes requires the kernel to hold up other work. Interrupt handlers, memory management, locking operations — the kernel has many internal paths that, in a standard build, cannot be interrupted. If your real-time process needs to run exactly now, but the kernel is in the middle of a non-preemptible section doing something else, your process waits. That wait is called scheduling latency, and in standard Linux it can spike unpredictably.
|
||||
|
||||
For a web server, a latency spike of 5ms is noise. For a control system running a simulation where a missed deadline means corrupted state, it's a failure.
|
||||
|
||||
## What PREEMPT_RT Changes
|
||||
|
||||
The PREEMPT_RT patch set, which has been in development since 2005 and was finally merged into the mainline kernel with v6.12 in late 2024, addresses this by making the kernel itself fully preemptible. The key changes:
|
||||
|
||||
**Threaded interrupts.** Instead of running interrupt handlers in a context that can't be preempted, PREEMPT_RT converts them to kernel threads that the scheduler can manage like any other thread. A high-priority real-time task can preempt an interrupt handler if needed.
|
||||
|
||||
**Priority-inherited mutexes.** Standard Linux spinlocks can create priority inversion: a high-priority task waiting for a lock held by a low-priority task gets stuck behind everything the low-priority task gets preempted by. PREEMPT_RT replaces these with proper mutexes that raise the priority of the lock holder while it's blocking a higher-priority thread.
|
||||
|
||||
**Fully preemptible kernel paths.** The long non-preemptible sections in the kernel get eliminated or minimized, bounding the worst-case latency the scheduler can impose.
|
||||
|
||||
The result is a kernel where scheduling latency can be measured in microseconds rather than milliseconds, consistently, even under load.
|
||||
|
||||
## Tuning Beyond the Kernel
|
||||
|
||||
The kernel is only part of the picture. Getting deterministic latency in production requires additional work:
|
||||
|
||||
**CPU isolation.** You dedicate specific cores to real-time tasks using `isolcpus` boot parameters, removing them from the general scheduler pool. Background kernel threads, IRQ balancing, and RCU callbacks all get steered away from those cores. The real-time task gets CPU time without competition.
|
||||
|
||||
**Interrupt affinity.** Device interrupts get pinned to specific non-isolated cores so they don't interrupt your critical processes. You modify `/proc/irq/N/smp_affinity` directly, and usually disable `irqbalance` entirely since it fights against your configuration.
|
||||
|
||||
**Memory locking.** Real-time processes call `mlockall()` at startup to prevent their memory from being paged out. A page fault at the wrong moment will blow your latency budget immediately.
|
||||
|
||||
**NUMA awareness.** On multi-socket systems, memory access times depend on which socket the memory is physically on relative to where the CPU is. Binding your real-time process to CPUs and memory on the same NUMA node eliminates a whole class of latency variance.
|
||||
|
||||
## Hard vs. Soft Real-Time
|
||||
|
||||
Not everything that gets called "real-time" has the same requirements.
|
||||
|
||||
**Soft real-time** means deadlines are important and you try hard to meet them, but occasional misses are tolerable. Video playback is soft real-time. A dropped frame is annoying; it doesn't corrupt anything.
|
||||
|
||||
**Hard real-time** means missing a deadline is a system failure. Industrial control systems, flight simulation, certain medical devices. The system must guarantee deadline adherence, not just optimize for it statistically.
|
||||
|
||||
Most of what real-time Linux enables sits in the hard category. The industries that use it, aerospace, defense, industrial automation, simulation, aren't interested in "usually meets deadlines." They need provable bounds.
|
||||
|
||||
## Why This Is Getting More Relevant
|
||||
|
||||
The PREEMPT_RT merge into mainline is a significant milestone. For years, using real-time Linux meant carrying an out-of-tree patch set and rebuilding the kernel yourself, which made it harder to maintain and harder to justify in organizations with conservative change management policies.
|
||||
|
||||
With real-time capability in the upstream kernel, distributions can ship it as a supported configuration. Red Hat already has RHEL for Real Time. The barrier to adopting real-time Linux in environments that need it keeps dropping.
|
||||
|
||||
The systems that need deterministic behavior aren't going away either. Autonomous vehicles, robotics, increasingly complex simulation environments. If anything, the demand is growing.
|
||||
|
||||
Understanding what real-time actually means, and what it costs to achieve, is increasingly useful knowledge.
|
||||
51
_posts/2025-11-16-why-nostr-never-took-off.md
Normal file
51
_posts/2025-11-16-why-nostr-never-took-off.md
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
layout: post
|
||||
title: "Why Nostr Never Took Off"
|
||||
date: 2025-11-16
|
||||
description: "I was an early Nostr adopter. Two and a half years later, I find myself removing the link from my website after inactivity."
|
||||
tags: [nostr, social-media, decentralization]
|
||||
---
|
||||
|
||||
I wrote about Nostr in 2023, two and a half years ago. I was cautiously optimistic. The protocol was technically interesting, the key-based identity model was the right idea, and the community was small but engaged. I set up NIP-05 verification on my domain and started posting.
|
||||
|
||||
Today I removed the Nostr link from this website. Not because the protocol is gone, it isn't, but because I no longer think it's heading somewhere worth pointing people toward. I want to explain why.
|
||||
|
||||
## What It Became
|
||||
|
||||
The Nostr feed I checked daily in 2023 was predominantly Bitcoin content. That was expected given the origin story — Nostr grew out of the Bitcoin community, Jack Dorsey funded early development, and the initial network effect came from people already in that space.
|
||||
|
||||
What I didn't anticipate was how thoroughly that initial demographic would come to define everything about the platform. By 2024 the dominant conversation wasn't about decentralized protocols or censorship resistance in any general sense. It was a very specific cultural and political monoculture. Post something that didn't fit the consensus worldview and you'd find yourself deprioritized on every major client's algorithm, not through central moderation, but through the informal social mechanics of who runs the popular relays and which follows get amplified.
|
||||
|
||||
The "no censorship" promise turned out to mean something narrower: no censorship of the specific things the dominant community wanted to say. Everything else found itself slowly squeezed out not by policy but by indifference and social pressure.
|
||||
|
||||
## The Usenet Problem
|
||||
|
||||
I've been thinking about how closely this maps to Usenet's decline. Usenet was technically elegant, decentralized before decentralization was a word people used, and destroyed by two things: spam and the collapse of signal-to-noise ratio. Not in that order.
|
||||
|
||||
The spam came first and was partially managed through killfiles and moderated groups. But the social problem was harder. Once a community scales past a certain point, the loudest and most persistent voices dominate regardless of quality. The thoughtful people leave because the ratio of effort to reward degrades. What's left is the people who are most motivated to keep posting, which tends to correlate with having the most extreme or commercially motivated things to say.
|
||||
|
||||
Nostr repeated this. The relay model doesn't solve discovery; it pushes it to clients and the social graph. If the social graph is already concentrated around a particular community, discovery just reinforces that concentration. Finding interesting people outside the dominant cluster requires already knowing who they are. The protocol has no answer for this.
|
||||
|
||||
## Technical Problems That Never Got Solved
|
||||
|
||||
The key management problem is still unsolved in any practical sense for average users. Lose your key, lose your identity. Every backup solution involves either trusting someone else with your key (which recreates the custodial problem you were trying to avoid) or being technically sophisticated enough to manage hardware security appropriately.
|
||||
|
||||
Relay sustainability never found a clean model. The paid relay experiment had some uptake but the economics are genuinely difficult. Relays that charged too little couldn't cover costs. Relays that charged enough to be sustainable were too expensive for casual users. Free relays became spam sinks that degraded the experience for everyone.
|
||||
|
||||
Content discovery across relays remains primitive. The global feed on most clients is unusable noise. Finding good content still requires word of mouth in communities, which means good content in small or niche communities stays invisible.
|
||||
|
||||
## What the Protocol Got Right
|
||||
|
||||
I want to be fair. The cryptographic identity model is correct. Owning a keypair as your identity is the right design. The protocol simplicity that let dozens of clients get built is genuinely valuable. The fact that there are still active developers working on NIP extensions shows that the core idea has staying power.
|
||||
|
||||
The failure isn't that the protocol is wrong. The failure is that a technically correct protocol isn't sufficient to build a healthy social space. You also need thoughtful community cultivation, economic models for infrastructure, and some answer to the discovery problem that doesn't just replicate existing social hierarchies.
|
||||
|
||||
None of those are easy. Centralized platforms haven't solved them either, they've just hidden the problem behind algorithms tuned for engagement. But the Nostr community largely believed that solving the technical censorship problem would also solve the social problems, and that turned out not to be true.
|
||||
|
||||
## Where I Actually Am Now
|
||||
|
||||
I still believe in the principle. Cryptographic identity, open protocols, no single point of control. These are the right building blocks for communication infrastructure that can survive corporate capture.
|
||||
|
||||
But building on a protocol that has become an echo chamber isn't advancing those goals, it's just participating in a different kind of monoculture. I'll keep watching what happens with the underlying protocol work. If the community composition changes or the discovery problems get solved in a real way, I'll look again.
|
||||
|
||||
For now: the link is gone, the account still exists, and I'm still reachable at `bryan@ramos.codes`
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
---
|
||||
layout: post
|
||||
title: "Why I'm Moving More AI Work Off the Cloud"
|
||||
date: 2026-02-10
|
||||
description: "Cloud AI is useful, but I want more of my day-to-day AI workflow on infrastructure I control."
|
||||
tags: [ai, local-ai, privacy, security]
|
||||
---
|
||||
|
||||
I have been moving more of my AI workflow onto hardware I control.
|
||||
|
||||
That does not mean I am done with cloud models. I still use them, and for some tasks they are clearly the best tool available. The frontier models are fast, capable, and convenient in a way that is hard to argue with.
|
||||
|
||||
But convenience is not the only thing I care about.
|
||||
|
||||
For a growing amount of my work, especially research, security work, and personal tooling, I want fewer external dependencies. I want to decide what context leaves my machine. I want tools that keep working when an API changes, a rate limit shows up, or a provider decides a workflow no longer fits neatly inside its acceptable-use boundaries.
|
||||
|
||||
That has pushed me toward a local-first AI setup: local models when they make sense, local search and retrieval, and a workstation built for experimenting without asking permission from someone else's platform.
|
||||
|
||||
## The Problem With Renting Every Thought
|
||||
|
||||
Cloud AI has a strange gravity to it. It is easy to start with one hosted model, one API key, and one chat window. Then slowly more of the workflow moves there. Notes, code, research questions, logs, documents, debugging sessions, threat models. The model gets better as it sees more context, so the incentive is always to give it more.
|
||||
|
||||
At some point the question changes from "is this useful?" to "how much of my working memory am I comfortable routing through a service I do not control?"
|
||||
|
||||
Sometimes the answer is: plenty. If I am asking a general programming question, summarizing public docs, or comparing technologies, the privacy concern is low. The cloud model is just a good tool.
|
||||
|
||||
Other times the answer is different. If I am working through security research, private notes, closed-source code, unfinished ideas, internal infrastructure details, or anything that would be awkward to paste into a public forum, I would rather keep the default path local.
|
||||
|
||||
This is less about paranoia than posture. I do not want to make a sensitive workflow depend on remembering, every single time, which context is safe to send somewhere else.
|
||||
|
||||
## Security Work Is Often Awkwardly Shaped
|
||||
|
||||
Security research creates a particular kind of friction with hosted models.
|
||||
|
||||
A lot of legitimate defensive work looks suspicious when reduced to a prompt. Understanding exploit chains, malware behavior, persistence mechanisms, credential abuse, phishing infrastructure, evasion techniques, and post-compromise behavior is necessary if you want to defend against those things. It is also dual-use by nature.
|
||||
|
||||
Cloud models often handle that ambiguity by refusing broadly. I understand why. Providers are operating at huge scale, they have to make conservative policy decisions, and they do not know who is asking or why.
|
||||
|
||||
But from the researcher's side, broad refusal can turn a useful assistant into a wall. The model does not need to help someone cause harm to be useful. It can help explain behavior, compare mitigations, reason through detections, review lab code, or identify what a suspicious artifact is trying to do.
|
||||
|
||||
For that kind of work, local models matter. Not because "uncensored" should mean irresponsible, but because security work needs room to discuss uncomfortable systems honestly. Running models locally puts the responsibility where it belongs: on the person operating the tool.
|
||||
|
||||
## Privacy Is Also About Drafts
|
||||
|
||||
People often talk about privacy as if it only matters for secrets: passwords, keys, customer data, proprietary documents. Those matter, obviously.
|
||||
|
||||
But drafts matter too.
|
||||
|
||||
Half-formed ideas, personal notes, research trails, failed experiments, and weird debugging paths say a lot about how someone thinks. They are not always sensitive in the legal sense, but they are still private. I want the freedom to explore messy ideas without turning every intermediate thought into data exhaust for a remote service.
|
||||
|
||||
Local AI makes that easier. I can point a model at notes, logs, repos, or experiments without first filtering everything through "would I be comfortable uploading this?"
|
||||
|
||||
That changes the feel of the tool. It becomes less like a web service and more like part of the machine.
|
||||
|
||||
## Local Models Are Not Magic
|
||||
|
||||
There are tradeoffs.
|
||||
|
||||
Local models are often slower. They can be less capable than the best hosted models. Hardware is expensive, loud, hot, and occasionally annoying. Running the stack yourself means you inherit the boring parts: drivers, model formats, disk space, memory pressure, cooling, updates, broken builds, and tools that almost work.
|
||||
|
||||
I do not think local AI replaces cloud AI for everything. That is not the point.
|
||||
|
||||
The point is to own the workflows where ownership matters. If a task needs the best reasoning model in the world, I may still use a cloud model. If a task needs privacy, repeatability, looser research constraints, or deeper integration with my local environment, I want a path that does not leave my hardware.
|
||||
|
||||
## Tools Matter As Much As Models
|
||||
|
||||
Running a model locally is only one piece of the problem.
|
||||
|
||||
A model sitting on a workstation still needs useful context. It needs current information, documentation, source material, and a way to inspect the web. Otherwise it becomes a very private but very stale assistant.
|
||||
|
||||
That is why I started building more of the surrounding tooling too. Local search. Local retrieval. Local reranking. Agent tools that do not need a paid search API every time they need to answer a grounded question.
|
||||
|
||||
The goal is not to rebuild the entire internet in my office. It is to make the common path private and inspectable: the model, the search layer, the retrieval pipeline, and the machine they run on.
|
||||
|
||||
## What I Want From This Stack
|
||||
|
||||
I want an AI setup that feels boring in the right ways.
|
||||
|
||||
I want to ask questions against my own notes without thinking about where the text is going. I want to run models that are useful for security research without fighting a policy layer designed for the most abusive possible user. I want web search that is cheap enough to use freely and transparent enough to debug when the results are bad.
|
||||
|
||||
Most of all, I want the stack to be mine. Not because every local component is better than every cloud service, but because control changes what you are willing to build.
|
||||
|
||||
When the infrastructure is yours, experiments get easier. Weird ideas get cheaper. Private workflows stay private by default. And when something breaks, you can open the box and see why.
|
||||
|
||||
That is the direction I want more of my AI work to move in.
|
||||
111
_posts/2026-03-18-building-a-local-ai-rig.md
Normal file
111
_posts/2026-03-18-building-a-local-ai-rig.md
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
---
|
||||
layout: post
|
||||
title: "Building a Local AI Rig"
|
||||
date: 2026-03-18
|
||||
description: "The machine I built for local AI work, why I built it, and why good enough hardware changes how I use these tools."
|
||||
tags: [ai, local-ai, hardware, rocm]
|
||||
---
|
||||
|
||||
After deciding I wanted more of my AI workflow to run locally, the next question was what kind of machine would make that practical.
|
||||
|
||||
Not a benchmark trophy. Not a datacenter in a spare room. Just a workstation that could run useful models every day without making the whole thing feel like a science project.
|
||||
|
||||
The machine I ended up with is built around an AMD EPYC 7402P, 256 GB of RAM, a 2 TB NVMe drive, and an AMD Radeon AI PRO R9700 with 32 GB of VRAM. The board gives me real x16 PCIe slots too, which leaves room to add more GPUs later instead of rebuilding the whole machine around a dead-end platform. It runs Linux with ROCm, and ROCm has been good enough for the kind of AI work I care about.
|
||||
|
||||
That last part matters because a lot of local AI discussion still assumes NVIDIA is the only serious path. NVIDIA is clearly the easier default in many cases. CUDA support is better, more projects test against it first, and you run into fewer weird edges.
|
||||
|
||||
But "best supported" and "only workable" are not the same thing. For my use case, this AMD setup has been usable enough that I can stop thinking about the GPU most of the time and focus on the work.
|
||||
|
||||
## Buying Around the Hype
|
||||
|
||||
AI hardware pricing is strange right now.
|
||||
|
||||
The obvious parts are expensive because everyone wants them. New NVIDIA cards, especially the high-end ones, are priced like the market knows exactly how much demand local AI has created. I did not want to build the whole machine around paying hype-cycle prices for every component.
|
||||
|
||||
So the rest of the system is deliberately unglamorous: last-generation server-grade hardware that is still very capable. The EPYC platform gives me cores, memory capacity, PCIe lanes, and room to grow without paying workstation-fashion prices. It is not the newest thing, but it is exactly the kind of hardware that becomes interesting once it falls out of the datacenter upgrade cycle.
|
||||
|
||||
The GPU is the more deliberate bet. The Radeon AI PRO R9700 is a new card, but it was dramatically cheaper than chasing the top NVIDIA consumer cards. At roughly a fifth of the price of the NVIDIA 5090s I was seeing, the question became whether I believed the AMD software ecosystem would keep improving enough to make the tradeoff worth it.
|
||||
|
||||
So far, that bet looks reasonable. ROCm is not as polished as CUDA, but I am getting decent performance out of the card today. For my workloads, that matters more than having the most obvious logo on the box.
|
||||
|
||||
## What I Wanted From the Machine
|
||||
|
||||
The goal was not to beat hosted frontier models. A local workstation is not going to turn into an infinite cloud API just because it has a large GPU in it.
|
||||
|
||||
What I wanted was a default place to run private work.
|
||||
|
||||
I wanted to be able to test models without uploading notes, code, or research context to a third party. I wanted a box that could sit on my network and be available whenever I wanted to experiment. I wanted enough memory that large models, local search tools, indexing jobs, and normal development work would not constantly fight each other.
|
||||
|
||||
I also wanted the machine to be boring. There is a version of local AI where every session starts with debugging drivers, chasing library versions, or trying to remember which environment variables made something work last week. That gets old fast. The rig needed to be powerful enough to be useful, but stable enough to fade into the background.
|
||||
|
||||
## The Daily Models
|
||||
|
||||
The models I settled on for day-to-day use are Qwen3.6 27B and Qwen3.6 35B-A3B.
|
||||
|
||||
That has been a good balance for me. They are capable enough for coding help, research, summarization, and general reasoning without making every prompt feel painfully slow. They are also large enough that the local setup feels meaningfully different from running a tiny model just to prove that local inference works.
|
||||
|
||||
In normal use, I see roughly 25 tokens per second from the 27B model and around 60 tokens per second from the 35B-A3B model. Those are not formal benchmark numbers, but they are the numbers that matter for me: fast enough that I reach for the local models during the day instead of treating them like a novelty.
|
||||
|
||||
That is the line I care about: not "can I technically run a model?" but "would I actually choose to use this?"
|
||||
|
||||
Plenty of local AI setups clear the first bar and fail the second. A model can be private, cheap per token, and fully under your control, but if it is too slow or too weak you eventually stop reaching for it. For local AI to matter, it has to become part of the normal workflow.
|
||||
|
||||
This machine gets close enough to that for me.
|
||||
|
||||
## Why So Much System RAM?
|
||||
|
||||
The 256 GB of RAM is not there because every model needs it. Most of the time, the GPU is the part people talk about, and for good reason. VRAM decides a lot about what can run comfortably.
|
||||
|
||||
But system RAM gives the machine breathing room.
|
||||
|
||||
It lets me keep larger models, caches, indexing jobs, containers, build trees, and other tools around without the machine feeling fragile. It also matters for experiments where not everything fits neatly in VRAM. Local AI is not just one process running one model. It tends to become a small pile of services: inference, search, retrieval, development tools, monitoring, and whatever else I am currently testing.
|
||||
|
||||
I did not want the box to be useful only when treated delicately.
|
||||
|
||||
## ROCm Has Been Good Enough
|
||||
|
||||
ROCm still has rough edges. I would not pretend otherwise. Some projects assume CUDA first. Some instructions are written as if AMD users do not exist. Sometimes support depends on exactly which GPU generation, kernel, library, or build flags are involved.
|
||||
|
||||
But for this machine, with this GPU, it has been good enough to do real work.
|
||||
|
||||
That is an important distinction. I am not trying to make a universal claim that AMD is the right choice for everyone building a local AI box. If someone wants the smoothest possible path and has the budget, NVIDIA is still the safest answer.
|
||||
|
||||
I am saying that the AMD path is workable now in a way that matters. It is not just a curiosity. I can run my daily models, build against the stack, and get decent performance. The machine is useful.
|
||||
|
||||
For me, that is the threshold.
|
||||
|
||||
## Owning the Box Changes the Workflow
|
||||
|
||||
The biggest difference is psychological.
|
||||
|
||||
When inference is local, I use it differently. I am more willing to paste rough notes into it. I am more willing to let it chew on something unpolished. I am more willing to test odd workflows, run long experiments, and point tools at local files.
|
||||
|
||||
There is no per-token bill in the back of my mind. There is no question about which provider is storing what. There is no need to route every experiment through a hosted interface that was built for a general audience.
|
||||
|
||||
The machine is not free. The hardware cost is real. Power and heat are real. Time spent maintaining the setup is real.
|
||||
|
||||
But once it exists, the marginal cost of curiosity gets much lower.
|
||||
|
||||
That matters more than I expected.
|
||||
|
||||
## What It Still Does Not Solve
|
||||
|
||||
Local hardware does not remove all the hard parts.
|
||||
|
||||
The best cloud models are still better at many tasks. Long-context work still runs into memory pressure. Some software stacks are fragile. Model quality varies wildly. Quantization choices matter. A bad prompt still gives a bad answer, just privately.
|
||||
|
||||
There is also a maintenance burden that hosted tools hide. If something breaks, I own it. Driver updates, ROCm changes, model compatibility, build failures, disk usage, and thermal behavior all become my problem.
|
||||
|
||||
That is the trade.
|
||||
|
||||
I am comfortable with it because the machine gives me something I cannot get from a subscription: a place to experiment freely, privately, and repeatedly.
|
||||
|
||||
## The Point
|
||||
|
||||
The local rig is not about rejecting the cloud completely. It is about changing the default.
|
||||
|
||||
Cloud models are still part of my toolbox. But for everyday AI work, especially the parts involving private notes, security research, local code, and experiments that benefit from being close to the machine, I want a capable local path.
|
||||
|
||||
This setup gives me that.
|
||||
|
||||
It is not perfect. It does not need to be. It is fast enough, private enough, and flexible enough that I actually use it. That is the thing that matters.
|
||||
|
|
@ -0,0 +1,112 @@
|
|||
---
|
||||
layout: post
|
||||
title: "Experimenting With TurboQuant and MoE Caching"
|
||||
date: 2026-04-16
|
||||
description: "Some notes from maintaining a TurboQuant llama.cpp fork and testing whether hot MoE experts on the GPU could make a huge local model practical."
|
||||
tags: [ai, llama-cpp, local-ai, inference]
|
||||
---
|
||||
|
||||
After getting the local AI rig into a usable place, I started poking at the next obvious problem: how far could I push it?
|
||||
|
||||
The model I was interested in was Qwen3.5 397B-A17B. It is the kind of model that makes local inference feel ridiculous in both directions. On one hand, the fact that it can run at all on a machine in my house is impressive. On the other hand, "can run" and "is pleasant to use" are very different things.
|
||||
|
||||
That led me into two related experiments in my llama.cpp fork: maintaining a TurboQuant branch for long-context inference, and testing a Mixture-of-Experts cache that tried to keep the hot experts on the GPU while leaving the rest of the model in system RAM.
|
||||
|
||||
TurboQuant was the clear success. The MoE cache was the useful negative result.
|
||||
|
||||
## TurboQuant Was the Win
|
||||
|
||||
The TurboQuant side was not some grand original implementation from scratch. It was mostly integration and maintenance work.
|
||||
|
||||
There was an existing TurboQuant llama.cpp fork, and my work was mainly about rebasing that onto a newer llama.cpp release so I could use it with the rest of my setup. That kind of work is less glamorous than writing a new algorithm, but it is a big part of making local AI experiments real.
|
||||
|
||||
llama.cpp moves quickly. Backends change, build systems change, kernel code changes, model support changes, and a fork that worked a few months ago can become stale fast. Rebasing an inference fork is not just "resolve a conflict and move on." You have to make sure the pieces still mean the same thing after upstream moved underneath them.
|
||||
|
||||
I have already fallen behind again and need to redo that rebase at some point. That is the cost of carrying an experimental branch on top of a fast-moving project.
|
||||
|
||||
But the result was absolutely worth it.
|
||||
|
||||
TurboQuant attacks one of the most annoying limits in local inference: the KV cache. Long context is useful, but it is not free. Every token leaves memory behind, and at large context sizes that memory becomes a serious part of whether a model is usable at all.
|
||||
|
||||
The Google paper behind TurboQuant, [TurboQuant: Online Vector Quantization with Near-optimal Distortion Rate](https://huggingface.co/papers/2504.19874), argues that KV cache quantization can get extremely aggressive while staying effectively quality neutral at practical bit widths. That lines up with what I saw in practice.
|
||||
|
||||
With TurboQuant in my llama.cpp fork, KV cache size dropped from roughly 12 GB to about 1.2 GB while still retaining the full model's 262k token context. In normal use it felt almost entirely lossless.
|
||||
|
||||
That is not a small improvement. That changes what kind of long-context work is realistic on local hardware.
|
||||
|
||||
I would like to see llama.cpp incorporate this upstream. For now, I maintain the fork because the difference is too useful to give up.
|
||||
|
||||
## The MoE Cache Idea Was Different
|
||||
|
||||
The more uncertain experiment was the MoE cache.
|
||||
|
||||
Mixture-of-Experts models are strange from a systems point of view. The full model can be enormous, but each token only activates part of it. That creates an obvious temptation: if the model is too large to keep entirely on the GPU, maybe you can keep the most-used experts there and leave the rest in CPU memory.
|
||||
|
||||
That was the idea I wanted to test with Qwen3.5 397B-A17B.
|
||||
|
||||
The rough plan was:
|
||||
|
||||
- load the full model into system RAM
|
||||
- track which experts were getting used
|
||||
- keep the hot experts resident on the GPU
|
||||
- fall back to CPU memory for the rest
|
||||
- see whether the cache hit rate was high enough to beat the simple approach
|
||||
|
||||
In theory, that sounds promising. In practice, the machine still has to move data across the system in exactly the wrong places. The cache can help only if the experts it keeps on the GPU are reused often enough to make up for the cost of managing the cache and moving missed experts around.
|
||||
|
||||
That was the real question: not whether the idea could be implemented, but whether the hardware balance made it worth doing.
|
||||
|
||||
## It Worked, But It Was Slower
|
||||
|
||||
The cache worked.
|
||||
|
||||
That is worth saying clearly. The experiment was not a failure in the sense of "this could not run." The model loaded. The expert cache did what it was supposed to do. The system could keep hot experts on the GPU and run the rest out of RAM.
|
||||
|
||||
But the performance was not good enough.
|
||||
|
||||
With the MoE cache enabled, I was seeing around 8 tokens per second. With the model fully loaded in RAM and no MoE cache, I was seeing closer to 10 tokens per second.
|
||||
|
||||
That is not the result I wanted, but it is the result that matters.
|
||||
|
||||
The simpler approach was faster. Not by an enormous amount, but enough that the extra complexity was hard to justify. If a cache makes the system more complicated and still loses to the baseline, the right answer is not to pretend the cache won. The right answer is to ask why.
|
||||
|
||||
## The Bottleneck Was the System
|
||||
|
||||
This is where local AI gets interesting to me.
|
||||
|
||||
A lot of model discussion focuses on the model itself: parameter count, quantization, context length, benchmark scores. Those things matter, but at this scale the system around the model matters just as much.
|
||||
|
||||
The MoE cache was betting that GPU residency for hot experts would beat the cost of pulling everything through CPU memory. On my hardware, that bet did not pay off. The transfer costs, cache management, and actual expert access pattern did not line up well enough.
|
||||
|
||||
That does not mean the idea is useless. It means the idea is hardware-sensitive.
|
||||
|
||||
On a different machine, the answer could change. More VRAM, multiple GPUs, faster PCIe, different memory bandwidth, a different MoE activation pattern, or a smarter cache policy could move the result. This is exactly why I wanted a local rig in the first place: I can test ideas against real hardware instead of guessing.
|
||||
|
||||
## What I Changed in the Fork
|
||||
|
||||
My fork ended up with a stack of experimental pieces around this idea:
|
||||
|
||||
- TurboQuant rebased onto a newer llama.cpp base
|
||||
- an MoE expert activation profiler
|
||||
- cache configuration exposed through normal runtime flags
|
||||
- hot-expert seeding from profiler output
|
||||
- fixes for cache correctness issues I ran into while testing
|
||||
- hysteresis so experts had to show up more than once before being promoted
|
||||
|
||||
Some of that was infrastructure more than optimization. Profiling, configuration, and correctness fixes are not the exciting part of an experiment, but they are what make the result believable.
|
||||
|
||||
Without them, it is too easy to fool yourself. Maybe the cache is faster. Maybe the workload changed. Maybe the model is silently wrong. Maybe the one prompt you tested happened to hit the right experts. The boring pieces are how you reduce that uncertainty.
|
||||
|
||||
## The Useful MoE Result
|
||||
|
||||
The useful MoE result was not "I made a 397B model fast on one consumer GPU."
|
||||
|
||||
I did not.
|
||||
|
||||
The useful result from that side of the experiment was learning where the limits were. Qwen3.5 397B-A17B could run locally on my machine. The MoE cache idea could be implemented. But on this hardware, with this setup, the cache was slower than leaving the model in RAM.
|
||||
|
||||
That is still progress. A negative result with numbers is better than a vague assumption. Now I know more about where the bottleneck is, what kind of hardware might change the answer, and which parts of the software stack are worth revisiting later.
|
||||
|
||||
I also have a fork that is easier to experiment with next time, even if it has already started to fall behind upstream again.
|
||||
|
||||
That is the shape of a lot of local AI work right now. The field moves quickly, the tools are uneven, and not every idea survives contact with the machine. But when the hardware is yours and the stack is inspectable, even the failed experiments leave something useful behind.
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 42 KiB |
BIN
assets/headshot.jpeg
Normal file
BIN
assets/headshot.jpeg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 352 KiB |
BIN
assets/pfp.gif
BIN
assets/pfp.gif
Binary file not shown.
|
Before Width: | Height: | Size: 9 MiB |
Binary file not shown.
|
Before Width: | Height: | Size: 2.5 MiB |
5
blog.html
Normal file
5
blog.html
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
layout: blog
|
||||
title: Blog
|
||||
permalink: /blog
|
||||
---
|
||||
600
css/styles.css
600
css/styles.css
|
|
@ -1,89 +1,601 @@
|
|||
/* =============================================
|
||||
DESIGN TOKENS — DARK (default / :root fallback)
|
||||
============================================= */
|
||||
[data-theme="dark"],
|
||||
:root {
|
||||
--color-bg: #171e26;
|
||||
--color-surface: #10161d;
|
||||
--color-surface-alt: #1e2731;
|
||||
--color-border: #243447;
|
||||
--color-text-primary: #E8EDF2;
|
||||
--color-text-secondary: #8FA3B1;
|
||||
--color-text-muted: #566B7A;
|
||||
--color-accent: #4EAECF;
|
||||
--color-accent-hover: #3B9AB8;
|
||||
--color-quote-bar: #4A9B2A;
|
||||
--color-link: #E8EDF2;
|
||||
--color-link-hover: #4EAECF;
|
||||
--color-toggle-bg: #243447;
|
||||
--color-toggle-fg: #8FA3B1;
|
||||
--color-toggle-hover-bg: #2E4259;
|
||||
--color-toggle-hover-fg: #E8EDF2;
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
DESIGN TOKENS — LIGHT
|
||||
============================================= */
|
||||
[data-theme="light"] {
|
||||
--color-bg: #F5F7FA;
|
||||
--color-surface: #FFFFFF;
|
||||
--color-surface-alt: #EEF1F5;
|
||||
--color-border: #D0D9E2;
|
||||
--color-text-primary: #1A2733;
|
||||
--color-text-secondary: #4A6070;
|
||||
--color-text-muted: #8FA3B1;
|
||||
--color-accent: #1A7FA0;
|
||||
--color-accent-hover: #155E78;
|
||||
--color-quote-bar: #3A8520;
|
||||
--color-link: #1A2733;
|
||||
--color-link-hover: #1A7FA0;
|
||||
--color-toggle-bg: #E0E8F0;
|
||||
--color-toggle-fg: #4A6070;
|
||||
--color-toggle-hover-bg: #C8D8E8;
|
||||
--color-toggle-hover-fg: #1A2733;
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
LAYOUT & TYPOGRAPHY VARIABLES
|
||||
============================================= */
|
||||
:root {
|
||||
--font-sans: 'Inter', system-ui, -apple-system, sans-serif;
|
||||
--font-mono: 'Fira Code', 'Cascadia Code', monospace;
|
||||
|
||||
--text-xs: 0.75rem;
|
||||
--text-sm: 0.875rem;
|
||||
--text-base: 1rem;
|
||||
--text-lg: 1.125rem;
|
||||
--text-xl: 1.25rem;
|
||||
--text-2xl: 1.5rem;
|
||||
--text-3xl: 1.875rem;
|
||||
|
||||
--weight-normal: 400;
|
||||
--weight-medium: 500;
|
||||
--weight-semibold: 600;
|
||||
|
||||
--leading-tight: 1.25;
|
||||
--leading-normal: 1.6;
|
||||
--leading-relaxed: 1.75;
|
||||
|
||||
--space-1: 0.25rem;
|
||||
--space-2: 0.5rem;
|
||||
--space-3: 0.75rem;
|
||||
--space-4: 1rem;
|
||||
--space-5: 1.25rem;
|
||||
--space-6: 1.5rem;
|
||||
--space-8: 2rem;
|
||||
--space-10: 2.5rem;
|
||||
--space-12: 3rem;
|
||||
|
||||
--max-width: 740px;
|
||||
--radius-sm: 4px;
|
||||
--radius-md: 8px;
|
||||
--radius-lg: 12px;
|
||||
--radius-full: 9999px;
|
||||
|
||||
--transition-fast: 150ms ease;
|
||||
--transition-base: 200ms ease;
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
RESET + BASE
|
||||
============================================= */
|
||||
*, *::before, *::after {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
html {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
margin: auto;
|
||||
max-width: 100%;
|
||||
color: white;
|
||||
background-color: #1B262C;
|
||||
background-color: var(--color-bg);
|
||||
color: var(--color-text-primary);
|
||||
padding: 0;
|
||||
border: 0;
|
||||
}
|
||||
|
||||
body {
|
||||
padding: 10px 10px;
|
||||
margin: 20px auto;
|
||||
font-size: 1.0rem;
|
||||
max-width: 800px;
|
||||
font-family: sans-serif;
|
||||
padding: var(--space-4) var(--space-4);
|
||||
margin: var(--space-8) auto;
|
||||
font-size: var(--text-base);
|
||||
font-family: var(--font-sans);
|
||||
font-weight: var(--weight-normal);
|
||||
line-height: var(--leading-normal);
|
||||
max-width: var(--max-width);
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
HEADINGS
|
||||
============================================= */
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
font-family: var(--font-sans);
|
||||
font-weight: var(--weight-semibold);
|
||||
line-height: var(--leading-tight);
|
||||
color: var(--color-text-primary);
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: var(--text-2xl);
|
||||
margin: 0;
|
||||
border: 0;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
text-align: center;
|
||||
letter-spacing: -0.015em;
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: 95%;
|
||||
font-size: var(--text-xl);
|
||||
border: 0;
|
||||
padding: 0;
|
||||
margin-top: var(--space-8);
|
||||
margin-bottom: var(--space-4);
|
||||
}
|
||||
|
||||
ul {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
justify-content: center;
|
||||
padding-right: 3rem;
|
||||
}
|
||||
|
||||
li {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
font-size: 25px;
|
||||
border: 0;
|
||||
margin: 0;
|
||||
padding: 0.5rem;
|
||||
}
|
||||
|
||||
#custom-substack-embed {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.shill {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
h3 {
|
||||
font-size: var(--text-lg);
|
||||
margin-top: var(--space-6);
|
||||
margin-bottom: var(--space-3);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
LINKS
|
||||
============================================= */
|
||||
a, a:active {
|
||||
color: inherit;
|
||||
color: var(--color-accent);
|
||||
text-decoration: none;
|
||||
transition: color var(--transition-fast);
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: gray;
|
||||
color: var(--color-accent-hover);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
HEADER
|
||||
============================================= */
|
||||
header {
|
||||
text-align: center;
|
||||
padding-bottom: var(--space-8);
|
||||
border-bottom: 1px solid var(--color-border);
|
||||
margin-bottom: var(--space-8);
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.pfp {
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
border-radius: 50%;
|
||||
width: 180px;
|
||||
height: 180px;
|
||||
border-radius: var(--radius-full);
|
||||
width: 120px;
|
||||
height: 120px;
|
||||
margin-bottom: var(--space-4);
|
||||
border: 2px solid var(--color-border);
|
||||
}
|
||||
|
||||
.header-tagline {
|
||||
font-size: var(--text-sm);
|
||||
color: var(--color-text-secondary);
|
||||
font-weight: var(--weight-medium);
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
margin: var(--space-2) 0 var(--space-6);
|
||||
}
|
||||
|
||||
header ul {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
gap: var(--space-1);
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
header li {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
font-size: 18px;
|
||||
border: 0;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
header li a {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
padding: var(--space-2) var(--space-3);
|
||||
border-radius: var(--radius-md);
|
||||
font-size: var(--text-xs);
|
||||
font-weight: var(--weight-medium);
|
||||
color: var(--color-text-secondary);
|
||||
transition: color var(--transition-fast), background-color var(--transition-fast);
|
||||
gap: var(--space-1);
|
||||
}
|
||||
|
||||
header li a i {
|
||||
font-size: var(--text-lg);
|
||||
}
|
||||
|
||||
header li a:hover {
|
||||
color: var(--color-accent);
|
||||
background-color: var(--color-surface);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
HOME BUTTON
|
||||
============================================= */
|
||||
.home-btn {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 36px;
|
||||
height: 36px;
|
||||
border-radius: var(--radius-md);
|
||||
border: 1px solid var(--color-border);
|
||||
background-color: var(--color-toggle-bg);
|
||||
color: var(--color-toggle-fg);
|
||||
font-size: 14px;
|
||||
transition: background-color var(--transition-fast), color var(--transition-fast), border-color var(--transition-fast);
|
||||
}
|
||||
|
||||
.home-btn:hover {
|
||||
background-color: var(--color-toggle-hover-bg);
|
||||
color: var(--color-toggle-hover-fg);
|
||||
border-color: var(--color-accent);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
THEME TOGGLE
|
||||
============================================= */
|
||||
.theme-toggle {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 0;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 36px;
|
||||
height: 36px;
|
||||
border-radius: var(--radius-md);
|
||||
border: 1px solid var(--color-border);
|
||||
background-color: var(--color-toggle-bg);
|
||||
color: var(--color-toggle-fg);
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
-webkit-appearance: none;
|
||||
appearance: none;
|
||||
line-height: 1;
|
||||
transition: background-color var(--transition-fast), color var(--transition-fast), border-color var(--transition-fast);
|
||||
}
|
||||
|
||||
.theme-toggle:hover {
|
||||
background-color: var(--color-toggle-hover-bg);
|
||||
color: var(--color-toggle-hover-fg);
|
||||
border-color: var(--color-accent);
|
||||
}
|
||||
|
||||
.theme-toggle .icon-moon { display: inline-block; }
|
||||
.theme-toggle .icon-sun { display: none; }
|
||||
[data-theme="light"] .theme-toggle .icon-moon { display: none; }
|
||||
[data-theme="light"] .theme-toggle .icon-sun { display: inline-block; }
|
||||
|
||||
/* =============================================
|
||||
THEME TRANSITION (only fires on user click)
|
||||
============================================= */
|
||||
.theme-transitions-enabled,
|
||||
.theme-transitions-enabled * {
|
||||
transition-property: color, background-color, border-color;
|
||||
transition-duration: var(--transition-base);
|
||||
transition-timing-function: ease;
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
HOMEPAGE SECTIONS
|
||||
============================================= */
|
||||
.section-bio {
|
||||
margin-bottom: var(--space-10);
|
||||
}
|
||||
|
||||
.section-bio p {
|
||||
line-height: var(--leading-relaxed);
|
||||
color: var(--color-text-primary);
|
||||
margin-bottom: var(--space-4);
|
||||
}
|
||||
|
||||
.section-heading {
|
||||
font-size: var(--text-xs);
|
||||
font-weight: var(--weight-semibold);
|
||||
letter-spacing: 0.12em;
|
||||
text-transform: uppercase;
|
||||
color: var(--color-text-muted);
|
||||
margin-top: 0;
|
||||
margin-bottom: var(--space-4);
|
||||
padding-bottom: var(--space-2);
|
||||
border-bottom: 1px solid var(--color-border);
|
||||
}
|
||||
|
||||
.section-skills {
|
||||
margin-bottom: var(--space-10);
|
||||
}
|
||||
|
||||
.skills-grid {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
gap: var(--space-2);
|
||||
}
|
||||
|
||||
.skill-tag {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
padding: var(--space-1) var(--space-3);
|
||||
font-size: var(--text-sm);
|
||||
font-weight: var(--weight-medium);
|
||||
color: var(--color-text-secondary);
|
||||
background-color: var(--color-surface);
|
||||
border: 1px solid var(--color-border);
|
||||
border-radius: var(--radius-full);
|
||||
line-height: 1.5;
|
||||
transition: color var(--transition-fast), border-color var(--transition-fast), background-color var(--transition-fast);
|
||||
}
|
||||
|
||||
.skill-tag:hover {
|
||||
color: var(--color-accent);
|
||||
border-color: var(--color-accent);
|
||||
background-color: var(--color-surface-alt);
|
||||
}
|
||||
|
||||
.section-recent-posts {
|
||||
margin-bottom: var(--space-10);
|
||||
}
|
||||
|
||||
.view-all-posts {
|
||||
margin-top: var(--space-4);
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.view-all-posts a {
|
||||
color: var(--color-accent);
|
||||
}
|
||||
|
||||
.view-all-posts a:hover {
|
||||
color: var(--color-accent-hover);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
BLOG INDEX
|
||||
============================================= */
|
||||
.blog-list {
|
||||
margin-top: var(--space-4);
|
||||
}
|
||||
|
||||
.post-preview {
|
||||
padding: var(--space-5) 0;
|
||||
border-bottom: 1px solid var(--color-border);
|
||||
}
|
||||
|
||||
|
||||
.post-date {
|
||||
font-size: var(--text-xs);
|
||||
font-weight: var(--weight-medium);
|
||||
letter-spacing: 0.06em;
|
||||
color: var(--color-text-muted);
|
||||
text-transform: uppercase;
|
||||
display: block;
|
||||
margin-bottom: var(--space-1);
|
||||
}
|
||||
|
||||
.post-preview a {
|
||||
font-size: var(--text-base);
|
||||
font-weight: var(--weight-medium);
|
||||
color: var(--color-text-primary);
|
||||
display: block;
|
||||
margin-bottom: var(--space-1);
|
||||
}
|
||||
|
||||
.post-preview a:hover {
|
||||
color: var(--color-accent);
|
||||
}
|
||||
|
||||
.post-description {
|
||||
font-size: var(--text-sm);
|
||||
color: var(--color-text-secondary);
|
||||
margin-top: 0;
|
||||
line-height: var(--leading-normal);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
BLOG POST
|
||||
============================================= */
|
||||
.post-header {
|
||||
margin-bottom: var(--space-8);
|
||||
padding-bottom: var(--space-6);
|
||||
border-bottom: 1px solid var(--color-border);
|
||||
}
|
||||
|
||||
.post-header h1 {
|
||||
text-align: left;
|
||||
font-size: var(--text-2xl);
|
||||
letter-spacing: -0.02em;
|
||||
margin-bottom: var(--space-2);
|
||||
}
|
||||
|
||||
.post-content {
|
||||
line-height: var(--leading-relaxed);
|
||||
}
|
||||
|
||||
.post-content img {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
border-radius: var(--radius-md);
|
||||
border: 1px solid var(--color-border);
|
||||
margin: var(--space-6) 0;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.post-content h2,
|
||||
.post-content h3 {
|
||||
margin-top: var(--space-8);
|
||||
margin-bottom: var(--space-3);
|
||||
}
|
||||
|
||||
.post-content p {
|
||||
margin-bottom: var(--space-5);
|
||||
}
|
||||
|
||||
.post-content code {
|
||||
background-color: var(--color-surface-alt);
|
||||
color: var(--color-accent);
|
||||
padding: 0.1em 0.35em;
|
||||
border-radius: var(--radius-sm);
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.875em;
|
||||
}
|
||||
|
||||
.post-content pre {
|
||||
background-color: var(--color-surface-alt);
|
||||
border: 1px solid var(--color-border);
|
||||
padding: var(--space-5);
|
||||
border-radius: var(--radius-md);
|
||||
overflow-x: auto;
|
||||
margin-bottom: var(--space-6);
|
||||
}
|
||||
|
||||
.post-content pre code {
|
||||
background: none;
|
||||
color: var(--color-text-primary);
|
||||
padding: 0;
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.post-content blockquote {
|
||||
border-left: 3px solid var(--color-quote-bar);
|
||||
margin-left: 0;
|
||||
margin-right: 0;
|
||||
padding: var(--space-3) var(--space-5);
|
||||
background-color: var(--color-surface);
|
||||
border-radius: 0 var(--radius-sm) var(--radius-sm) 0;
|
||||
color: var(--color-text-secondary);
|
||||
margin-bottom: var(--space-5);
|
||||
}
|
||||
|
||||
.post-nav {
|
||||
margin-top: var(--space-10);
|
||||
padding-top: var(--space-5);
|
||||
border-top: 1px solid var(--color-border);
|
||||
font-size: var(--text-sm);
|
||||
}
|
||||
|
||||
.post-nav a {
|
||||
color: var(--color-text-secondary);
|
||||
}
|
||||
|
||||
.post-nav a:hover {
|
||||
color: var(--color-accent);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
PGP KEY PAGE
|
||||
============================================= */
|
||||
.gpg {
|
||||
display: inline-block;
|
||||
background-color: #222831;
|
||||
border-radius: 15px;
|
||||
display: block;
|
||||
background-color: var(--color-surface-alt);
|
||||
border: 1px solid var(--color-border);
|
||||
border-radius: var(--radius-md);
|
||||
max-width: 100%;
|
||||
word-wrap: break-word;
|
||||
word-break: break-all;
|
||||
padding: var(--space-5);
|
||||
font-family: var(--font-mono);
|
||||
font-size: var(--text-xs);
|
||||
line-height: var(--leading-relaxed);
|
||||
color: var(--color-text-secondary);
|
||||
overflow-x: auto;
|
||||
margin-top: var(--space-4);
|
||||
}
|
||||
|
||||
.download {
|
||||
webkit-user-select: none;
|
||||
ms-user-select: none;
|
||||
user-select: none;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: var(--space-2);
|
||||
padding: var(--space-2) var(--space-4);
|
||||
background-color: var(--color-surface);
|
||||
border: 1px solid var(--color-border);
|
||||
border-radius: var(--radius-md);
|
||||
font-size: var(--text-sm);
|
||||
font-weight: var(--weight-medium);
|
||||
color: var(--color-text-secondary);
|
||||
transition: color var(--transition-fast), border-color var(--transition-fast);
|
||||
-webkit-user-select: none;
|
||||
-ms-user-select: none;
|
||||
user-select: none;
|
||||
}
|
||||
|
||||
.download:hover {
|
||||
color: var(--color-accent);
|
||||
border-color: var(--color-accent);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
FOOTER
|
||||
============================================= */
|
||||
footer {
|
||||
margin-top: var(--space-8);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
font-size: var(--text-base);
|
||||
color: var(--color-text-muted);
|
||||
}
|
||||
|
||||
footer a {
|
||||
color: var(--color-text-muted);
|
||||
}
|
||||
|
||||
footer a:hover {
|
||||
color: var(--color-accent);
|
||||
}
|
||||
|
||||
/* =============================================
|
||||
RESPONSIVE
|
||||
============================================= */
|
||||
@media (max-width: 600px) {
|
||||
body {
|
||||
padding: var(--space-4) var(--space-3);
|
||||
margin-top: var(--space-5);
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: var(--text-xl);
|
||||
}
|
||||
|
||||
.pfp {
|
||||
width: 96px;
|
||||
height: 96px;
|
||||
}
|
||||
|
||||
.theme-toggle,
|
||||
.home-btn {
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
font-size: 12px;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
22
feed.xml
Normal file
22
feed.xml
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
layout: null
|
||||
---
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<channel>
|
||||
<title>{{ site.title }}</title>
|
||||
<description>{{ site.description | xml_escape }}</description>
|
||||
<link>{{ site.url }}</link>
|
||||
<atom:link href="{{ site.url }}/feed.xml" rel="self" type="application/rss+xml"/>
|
||||
<lastBuildDate>{{ site.time | date_to_rfc822 }}</lastBuildDate>
|
||||
{% for post in site.posts limit:20 %}
|
||||
<item>
|
||||
<title>{{ post.title | xml_escape }}</title>
|
||||
<description>{{ post.content | xml_escape }}</description>
|
||||
<pubDate>{{ post.date | date_to_rfc822 }}</pubDate>
|
||||
<link>{{ post.url | absolute_url }}</link>
|
||||
<guid isPermaLink="true">{{ post.url | absolute_url }}</guid>
|
||||
</item>
|
||||
{% endfor %}
|
||||
</channel>
|
||||
</rss>
|
||||
77
index.html
77
index.html
|
|
@ -1,77 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
|
||||
<link rel="stylesheet" href="css/styles.css">
|
||||
|
||||
<script src="https://kit.fontawesome.com/f26d369dc4.js" crossorigin="anonymous"></script>
|
||||
<script src="https://substackapi.com/widget.js" async></script>
|
||||
|
||||
<title>Developer / Technologist</title>
|
||||
</head>
|
||||
|
||||
<header>
|
||||
<img src="assets/pfp.gif" alt="avatar" class="pfp">
|
||||
|
||||
<h1>Bryan Ramos</h1>
|
||||
|
||||
<ul>
|
||||
<li><a href="mailto:bryan@ramos.codes" class="fa-solid fa-envelope"></a></li>
|
||||
<li><a href="https://x.com/itme_brain" class="fa-brands fa-x-twitter"></a></li>
|
||||
<li><a href="https://github.com/itme-brain" class="fa-brands fa-github"></a></li>
|
||||
<li><a href="pgpkey.html" class="fa-sharp fa-solid fa-key"></a></li>
|
||||
<li><a href="https://iris.to/#/profile/npub17374whevgs040xkd48gr99g0xmpxd9snqt57dsfvtp0jcjt8yjeq49rdyt"
|
||||
class="fa-sharp fa-solid fa-feather"></a></li>
|
||||
<li><a href="https://itmebrain.substack.com" class="fa-solid fa-blog"></a></li>
|
||||
</ul>
|
||||
</header>
|
||||
|
||||
<body>
|
||||
<h2>
|
||||
Check out my work on Github.</br></br>
|
||||
|
||||
Contact with inquiries using email or visit my socials using the above links.</br></br>
|
||||
|
||||
Please encrypt any sensitive information using the provided PGP key.</br></br>
|
||||
</h2>
|
||||
|
||||
<div class="causes">
|
||||
<h1>Projects Supported</h1></br>
|
||||
<a href="https://www.linuxfoundation.org/about/donate">Linux Foundation</a></br>
|
||||
<a href="https://www.fsf.org/about/ways-to-donate/">Free Software Foundation</a></br>
|
||||
<a href="https://bitcoin.org/en/bitcoin-core/contribute/">Bitcoin Core</a></br>
|
||||
<a href="https://zeusln.app/about">Zeus App</a></br>
|
||||
<a href="https://github.com/ElementsProject/lightning">Core Lightning</a><br>
|
||||
<a href="https://github.com/nostr-protocol/nostr">Nostr</a></br>
|
||||
<a href="https://nixos.org/donate.html">NixOS Foundation</a></br>
|
||||
<a href="https://donate.torproject.org/">Tor Project</a></br>
|
||||
</div></br></br>
|
||||
|
||||
<footer>
|
||||
<p class="shill">
|
||||
Stay in touch and keep up-to-date with any articles I may publish
|
||||
</p>
|
||||
<div id="custom-substack-embed">
|
||||
<script>
|
||||
window.CustomSubstackWidget = {
|
||||
substackUrl: "itmebrain.substack.com",
|
||||
placeholder: "example@gmail.com",
|
||||
buttonText: "Confirm",
|
||||
theme: "custom",
|
||||
colors: {
|
||||
primary: "#54982D",
|
||||
input: "#1B262C",
|
||||
email: "#A69F9F",
|
||||
text: "#FFFFFF",
|
||||
}
|
||||
};
|
||||
</script>
|
||||
</div>
|
||||
</footer>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
47
index.md
Normal file
47
index.md
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
---
|
||||
layout: page
|
||||
title: Software & Systems Engineer
|
||||
---
|
||||
|
||||
<section class="section-bio" markdown="1">
|
||||
|
||||
I work with systems programming, real-time computing, and simulation engineering. My background spans Linux, low-level development in C and Python, and debugging latency-critical systems where determinism and microseconds count. Currently exploring applied AI and LLM integration.
|
||||
|
||||
Reach me at [bryan@ramos.codes](mailto:bryan@ramos.codes)
|
||||
|
||||
</section>
|
||||
|
||||
<section class="section-skills">
|
||||
<h2 class="section-heading">Stack & Tools</h2>
|
||||
<div class="skills-grid">
|
||||
<span class="skill-tag">C</span>
|
||||
<span class="skill-tag">Python</span>
|
||||
<span class="skill-tag">Bash</span>
|
||||
<span class="skill-tag">Linux</span>
|
||||
<span class="skill-tag">Real-Time Systems</span>
|
||||
<span class="skill-tag">Simulation Engineering</span>
|
||||
<span class="skill-tag">AI/LLM Integration</span>
|
||||
<span class="skill-tag">Automation</span>
|
||||
<span class="skill-tag">Networking</span>
|
||||
<span class="skill-tag">Docker</span>
|
||||
<span class="skill-tag">KVM/QEMU</span>
|
||||
<span class="skill-tag">Nix</span>
|
||||
<span class="skill-tag">SQL</span>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="section-recent-posts">
|
||||
<h2 class="section-heading">Recent Posts</h2>
|
||||
<div class="blog-list recent-posts">
|
||||
{% for post in site.posts limit:3 %}
|
||||
<div class="post-preview">
|
||||
<span class="post-date">{{ post.date | date: "%B %-d, %Y" }}</span>
|
||||
<a href="{{ post.url | relative_url }}">{{ post.title }}</a>
|
||||
{% if post.description %}
|
||||
<p class="post-description">{{ post.description }}</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
<p class="view-all-posts"><a href="{{ '/blog' | relative_url }}">View all posts →</a></p>
|
||||
</section>
|
||||
128
pgpkey.html
128
pgpkey.html
|
|
@ -1,128 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<link rel="stylesheet" href="css/styles.css">
|
||||
|
||||
<title>Bryan Ramos - PGP Key</title>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<a href="assets/public.key" class="download">🔑Key File
|
||||
<br/><br/></a>
|
||||
|
||||
<div class="gpg">
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----<br/><br/>
|
||||
|
||||
mQINBGP0BgMBEAC2v+n9plI0p+TqIrmvz7JHoYbtUK3NDkyNeIsgS+sE5nfLB1Ef<br/>
|
||||
vQCR0HdysgpmAUywqEx+YK7Nkr4szoK8nDLpgpSfaDZNss+ePu6eYVTVktelBn2Q<br/>
|
||||
5f5MKDILY9mkmDPgzvpDDhkFXGK3cpeUX+X5vY1W76yuRgm6zBDIux+yf027nw3U<br/>
|
||||
phesn/WlWXRsmAXG2helt1nB6Foj6LjgwRG/aKMI8cQq0JS13cfUZO1nq2ifM0pm<br/>
|
||||
4HqWgbZOKYWHsoOw4qNiuxWwVoL5E7UQW2MEyxZmLZaNohEpReYpI0N9FGB/AZYt<br/>
|
||||
iKn9SO9SmG+afE+gxrExJYZSGVHHKlPc79dcIBVvYEA8dV/OJBjHc83EhUQVU4vQ<br/>
|
||||
x1y386HYctlHVWZ64tc1XROlQe++NxfgQZz4jnvGzHMakr8/IZAV3LP3PGVAa7kx<br/>
|
||||
iVrTE+WodK/kELm1PMLWlWmXT3GiumOngm4y1dWtUirqxni/Nl7BA4eHM3Q3OZiR<br/>
|
||||
eEb80FkbXCoaP5REU1EdVlAW/ZGP+mTwiqekT5ThocaD/BgYSy9UlGf5YyOEnqOt<br/>
|
||||
G+0JfS3mG0PysFjF0B5dMyBquikD4zVBo3+a7ppbrAage3EFhHiX0Les0q566I8p<br/>
|
||||
0hlXS7nz0I4xAxxRLfydwJptndjZgeiq9o1XMRA0JUZQhzuk2VYQ6MSVhwARAQAB<br/>
|
||||
tB9CcnlhbiBSYW1vcyA8YnJ5YW5AcmFtb3MuY29kZXM+iQJOBBMBCgA4FiEE8fNG<br/>
|
||||
ZFhFKy3zUfHoZNErqVrOHy0FAmP0BgMCGwEFCwkIBwIGFQoJCAsCBBYCAwECHgEC<br/>
|
||||
F4AACgkQZNErqVrOHy25SBAArl6JHrDm3fLXPhwtHf9WzxQvW6BmMgLQQ+bGGGba<br/>
|
||||
A3e+eKb0ibSmXH9M22GOSxKqk2BePtoLFdyDKDFNwYDwzj0ioQ80Q9YR6aoSuwOf<br/>
|
||||
HwXeiYsgK76IbsRciXSv6JgAsXO9UOGTlHlTgFsE3AMjnCgPrHbV3SZdkFt71XMo<br/>
|
||||
fbRmYwC33HK6QNUXeq4O+gGO5vJI8Wx1mtmy6kq/3srzMpCGybg9M8C5AQoazo/u<br/>
|
||||
WOjO57QkUdbAXO8HbHInexsstJJn+0o/FLfMoOy7v/cpzTLbbpONRzQbEq1/Utt1<br/>
|
||||
TaIc1FTWT1b4oWnIGv2stlCGzx9IgsseJocSBG+kGgkKwVBWIcCwq+cCdfkOReCk<br/>
|
||||
VHTg1oRH8t078346KuxEaA7ofKaByirQosZUeF5WTyMuJUDf1mNxxZngRKjIHD3c<br/>
|
||||
lmK8REnYjQ4b+RfznfV8qc8tH624EUTNlT123ufUIvba0fR8OryhdxPOOgdLjlNL<br/>
|
||||
XdkfG5oENnBy3EzGn7xgR6sCRtlFSEcfKQFcec1fjqYMHxPAExajmSHLwr5107LT<br/>
|
||||
4B+F5eOt9CBFKW/cxnVwG/3oW0mzLa231V0eYquiYkbYHVswLdhr02vyHpLXXVZk<br/>
|
||||
JgiLSXIJ6yKwLA9W8HgHgDYCp899Jl+wqhFLxr7oUjXcLhuZO9Q3P3req0SJRfUu<br/>
|
||||
GTO5Ag0EY/QGQAEQANsJBUpkk0ZW5swgzC/c7pxv4VGS8VZcr3Isol8NHAUUwHyo<br/>
|
||||
jqAYNtqW8PQLgQ34uuuC5GCS2hxN57WdgmSkv/to8THl6IbE1V/YVaaGXX9yiJmH<br/>
|
||||
72//kc9g2prXyrtObwVhgKiYQxPPegm9ubLkb1khCTLhozCJDM1wbQxmE5I2cICC<br/>
|
||||
5lwCi1NDsAyvUtWANzb0EXPZh2iPv8sWMh3RStAGSsboHzHYdR9RZGRjKG/ET5zv<br/>
|
||||
OBbFpRLFjvMJUL22M0V5FFPbuz+4Aut21wkYdueHtREpUgAcba68Doz75jQb0PEZ<br/>
|
||||
52hjLKuXVf0/1sEPXUs/sL8kyl6QzIqFIXsrjbw6BrGSdhn6YoY95koCb6AXUrFC<br/>
|
||||
oOXQC5BecTcP7V3GOWDEaDUbjN8mc2t1ujs7KYIqi0UCiHa9m5L2Q/9TyOSLyjSf<br/>
|
||||
0VKHzib7Ov76GvphbYoQSXWX8R6ogcexQH6aQlXI31ir/HsHkatImYomySZiwNVV<br/>
|
||||
5PQD/7lbWGjLB6LB9PsyVIVl3uq+sSX7xKeogZkEuTcerKVJjpknisKh6aR/uJRV<br/>
|
||||
KJs2U3MolyVanDb/y6VBJrCOu8ZiCZuDtCntUg8MxeLNFO0MVdgAPiHMtJd8YrzK<br/>
|
||||
bhbkHBufAgOLMbGTYq47bQNuRz/CjIz0xll0tLeS9LD1hcSWX/nMhFgfxDjxABEB<br/>
|
||||
AAGJBGwEGAEKACAWIQTx80ZkWEUrLfNR8ehk0SupWs4fLQUCY/QGQAIbAgJACRBk<br/>
|
||||
0SupWs4fLcF0IAQZAQoAHRYhBDgB5+1vnI0s1XHgHmq9zRRNZkPIBQJj9AZAAAoJ<br/>
|
||||
EGq9zRRNZkPIMbUQAJaDnJHMMXTNmANva65XjY2eJpoYBCIvd8FodRfFCbAPkNad<br/>
|
||||
MtsCgd2dXZPizTOUNqcOujACd7u3P/VazYT0cUgjx6mpWdvxYuGMCM71WLHKeCaq<br/>
|
||||
bXzzKrNaREMDTsMBn0wrIr5ZEuRsLOi4ZVZ5vFvtMQYnzjNT6gON+fHpaD6sShnR<br/>
|
||||
VWXWaYtQ2ttN2+6gwmKCaqiH2suA+QkI/gPjqdMOeXvu6sMUd5IjaCBJy3Ddyjif<br/>
|
||||
/ZYkJUjDkxG7aC4B2XtGUf0lPG+kiCHGjgTsvIeYYSpi/TyevTF8QNfZWcp/NBcf<br/>
|
||||
ZXhCoUoA62zzQ2SXpydZpryKn8klAYQLLA8mq6v/ljqcwFyLYtx0Cw49Thspo/4r<br/>
|
||||
ba1jzsv5QdBveIKdGjzcuexTaIEFB6rQXIFuVVfn074tpZIO+KmHO/z62i73bbko<br/>
|
||||
67tm+VDvbgsGUd4536lSKMekbdn0+5ODl76AJCD0M+Vzxkl9X/fg4zgz0vG2Ppiq<br/>
|
||||
08LqBPidA9EQ+tEHm7OIXk9Z+wApDCb27zwsiygkV9uWXuEaNYjCjUZTEw9CYTuH<br/>
|
||||
CdCPOdeJYBzKpfGXldJo6F6NbLLXywL4ej2Lt99tqFF2tQ3I6SKyYx+I2veYsjKs<br/>
|
||||
7g29bF4WuU1IVi4Kn144NUzEHOJZKeyYOwEz5+chq9KuYBY8b1OHe1Q5pEFIbVIP<br/>
|
||||
/1pdwhs6zV8tJZOgzLb9q+yLuXH1Fk4YE9wZDh/rK3hpD+KGyNRa+0J70wdYDOqk<br/>
|
||||
4C9ybAaljvJPXO622Ai/RlFLQVK4KdJ2Ig9mwtIhwBvjnKkCmG502HGRUa3HVpDK<br/>
|
||||
pb9WDrH9eJPxkRew1y7Kl6ua10mNh7vMIbEDzZY36Eovzc127ANy/EQR8OwnI8Vg<br/>
|
||||
39rCq1wDVeULHmF4j63cm3pHo6LK1OGZjAkg9XjT/aDpuqigcdEmFjmx7RSBPZFC<br/>
|
||||
RZTJ6kcafbnxQfKx7soI7+1AWVSrTt+/XePZPubnFeMlfXtGVXejTG2rCWJqRpGZ<br/>
|
||||
sjwgGiOtcnzvF37TQ4XrWV5T45XeSmG4hsF+zShXqevGulOwGNPtJbmiINTaeKun<br/>
|
||||
1KxjSVpwkniOQgrWNSFCD2RzSEuQRKSg0XMbgPLbmplVO4WAzhQ/Ry4DpNqjJwkp<br/>
|
||||
2z5WQ8XhfsxecNBc10pbPGyDUbXk96bZSXc31s5tKIyUaCxMmUu87Z0q9KEaVrGc<br/>
|
||||
Tp69o4LIX8dhEqAx8Mk1AKpk8TsT0Ebc75X+xbzVoiimblUuB/+OrDsK7R0hihIe<br/>
|
||||
TU+1xOJ1gyppkuacOuHioV4k9k4NUwgk+YrSKTrhFEzbM6gcOngTB0VTFzQlEjxB<br/>
|
||||
wxl2qN7f0lFD6F0rLJ0Rm06xIwTNIe/0MfMXAJBB45DFuQINBGP0BlIBEADAkdgW<br/>
|
||||
M8SyGyde5Op/B9yMHNPfuSNRjK4/HHmLez1GTriNwuqor5FRrDCO8VPUbQX/x06O<br/>
|
||||
2HZj8fJWa+6hc9+giUTXNbYtlMVpZOUVhGxzuy2Y6YE82maBaJ3EB/KBP7zdgvKT<br/>
|
||||
bxmjv5hre9u/LaY6tloCzeaBUWPV9+e5Bxq72qC507V/z6lc+PgxWWfGkmWBuT+v<br/>
|
||||
laHWFb6ZM5ldtcMSdscrLBcxLMnjNIRlIaWpj+tvuInMdV3HrTn/bdHCP/Ybrf95<br/>
|
||||
DYY+7p+KPGrdXJH121f8qZXRihTJerJOGvGbue6FIJ+wYSEr3nb9bNyym/w+Mk9Z<br/>
|
||||
0wJZZVfjbqFNcGhTttZWlzdTJwerwj7cGsTtMcuIphhUdLhQns+dBTVKVrqvvHSu<br/>
|
||||
p/w9IpnyDhcgqv8v23xfSCuKooWPn2E1/Pd4enLCHVzmFW1xQDtDunRuxBbHYpM4<br/>
|
||||
5gknVdIp8bY23y1fj0mottIfgZZEfiMR6FJxseFcWuG7VdC7VITdgbNl5YDXw4ts<br/>
|
||||
xmg2qrRSNUTkFAKNwIekqwziay4DcnWkoikH+n3bHre5wQqFzHIV03Zo8YcgKvyT<br/>
|
||||
0hwAvn2wGRoIynInFMi2/314xbAUBq10QhREGOPS3oUvBUZxhTkiBMKVYyKA97JQ<br/>
|
||||
c2Xhrkx9cuZxh3y7j3DflRBW9XLJvbcLGDziTwARAQABiQI2BBgBCgAgFiEE8fNG<br/>
|
||||
ZFhFKy3zUfHoZNErqVrOHy0FAmP0BlICGwwACgkQZNErqVrOHy0dOxAAlNRb0yBq<br/>
|
||||
SLLU/pQHjnqRQsLpXFmokcAVfZcEoODTMmzPf3uKDExkHBsyRjbRrEazMLQZIwIb<br/>
|
||||
78AXvPx6W+lwkmrZ1IXfTkURMi2RmSSOcjTJzipM4WKkOy6zSg29chnBz8edq8AF<br/>
|
||||
rErYdY5IgGCn3RHtkGjtKRSV0m4cdoO/wqGHtZdxEhmfmAzs+Wwevqb1nzptG3my<br/>
|
||||
ZdEJ5rkgGcnvUjkJo815FjR1fuo0KSuVZVelvWMp6JFYMWc4FUh2bYWymIQ6u8/f<br/>
|
||||
2v8EnacG/oNHDkZG0edTPU4dClHCtXqejAxazHYUojJkFdWUMoEIJ7VYg23N4WAW<br/>
|
||||
0qf78uBOuGBjl8g5sOmu/IQpMsO51NiDSw/lGLfPsKJKTIe7N6Jxs8PT66Jqvw2U<br/>
|
||||
4moKEAcoLGxXkIfY7UMFGflaADzBQEebNiekRMw/SAxB3mRptuQ96QuCrpLE7kmI<br/>
|
||||
KPs0vk3om0Lz59q3JoYmMEoEIMM3Z1j94mp07nyJzKvOREtQYY7WIKG/sgUHekjm<br/>
|
||||
lrUfez8xHCG4G0r4KTiu3rGT/rvCehTxvkl4Gmimeo+XNb7vwcr1O0/DTH3ZCG8o<br/>
|
||||
+mwGnah7T6ch60YFSWm0RkxNozNHWJf5Ee6gVv7nEyB1pbuqhXHliv3hhK+/4SWW<br/>
|
||||
RMwhK4b5axJn9aHTu3rwDdaDpUkkApY4rhq5Ag0EY/QGZAEQAOXjz3loH0/mn+Wn<br/>
|
||||
wermse6fhyW+HJNIcWLdTZ3o44GhbkWb5VxCdb/FuOYIGxeTkF2KjCwHFCHCfN1/<br/>
|
||||
P8okvsnlGhuiZQRpVHBv1TBPzx4m94unXgEbyPYndKN/KGsJf7iOQ/HRs9CTUcZy<br/>
|
||||
5hj608Rd/Wr+mzzwOG7QIBEEjNhA5NhjpvWpbPGkOgVkYeMobyDmJjoUi7rnIoq+<br/>
|
||||
9XLV/wiBneXcinAFZVqbGCRNxhjRBhKubOjWftNfHCtZu96cCoGxDRwE+z6BVre4<br/>
|
||||
iv7VMmXQDPlISUFUa7cu9R2WTny2u09SPpNBHdhSSDtWOWXtYc52qG7HllA2GOQ6<br/>
|
||||
wd6t/RPDzp7pwTOB5O4htAchvQtyxS6fApy6Hb5q7tE7n31y8efT7FkTkxkHGWgM<br/>
|
||||
NoncmyKWIzyTI8/9TcRGPTdxYtbsGptP6x+MA6XbVELOTSJDGTXC3/xWa0Kv0B2/<br/>
|
||||
sjKu1pi9/9vBE/6D72V2bMoa3wx1vrTm5XNnvQf8subXt/jRN75Adp7HlvL/qnpy<br/>
|
||||
7AQRm2AiDndamCW7SsDpTGsF9AQcqX8m3cUt4TSacTJiSRHYycc23JZEhe26phkw<br/>
|
||||
CbZRvWkUcfuNBXWAaINVPDprZ4jArbVr+Fe1GMVSkV3WcHWf4o18kETjNPfCbdR3<br/>
|
||||
uYrD/qtaehHKFhm8ZeQV2n6ISzj1ABEBAAGJAjYEGAEKACAWIQTx80ZkWEUrLfNR<br/>
|
||||
8ehk0SupWs4fLQUCY/QGZAIbIAAKCRBk0SupWs4fLcubD/oDGub4+uep50VBUa0u<br/>
|
||||
BZAUu/oS664+53sZyvogMzeIT32DT3vDaa3W2aqUNX/dZVzOcsV07HO4yk6+kiSk<br/>
|
||||
1Db2FbRFODbFcs5mBYo/EFSxExhQMQFqgXaW3FrpvL5ljAwsjdoSN93DnMkLnC9K<br/>
|
||||
XZUyUT+RDcJnk0xS+0ex77nc8vp13n2huHuXU6BbEGofrT9br7Kyezh84GV9nxls<br/>
|
||||
C0PwTX0gBaesqeY/9rtAXq+p+kYBafbny/3zrL8CBwqHqRZWiNbkyGWx9WHvizZE<br/>
|
||||
0VJJzGl0CTP7aE/N42t+LDGuaA76SJXkkqGs7GmJ3EHVA8N/2Lwhf0saaG3cBrKx<br/>
|
||||
lXrJoSY7TxeoJ7rdt/KRJfKsU0bdXgVXDFrlf4ZvctCLZmQ0nno2cgYemTnELRYv<br/>
|
||||
FzrS2itqqWP1ev2iPpCbKp099i/w6D13C3jBVAVYPBapD6aaD7YHWLhHIA5zH7bF<br/>
|
||||
n8IgacgKBoJ8u3jo3eeT5CXfsrnwOYdrqposfMCUOriJHx41nGUqjNZDG2ByHxgS<br/>
|
||||
mnUd3lrjRDWTUzXj8pRN2K7Uqbbs2Mz4Q64MgbCkkTichMlVux8kH+O/I/veAYto<br/>
|
||||
OEpwdDwa67AtzYKG0ssOJI+po9TlbKYS4O4H8XnPhYSOEw8eObNPYCX7jyAjXloo<br/>
|
||||
1hbflYLyMYo1BxGR6bPS9gJA2w==<br/>
|
||||
=5uun<br/>
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
23
sitemap.xml
Normal file
23
sitemap.xml
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
layout: null
|
||||
---
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
|
||||
<url>
|
||||
<loc>{{ site.url }}/</loc>
|
||||
<lastmod>{{ site.time | date: '%Y-%m-%d' }}</lastmod>
|
||||
<priority>1.0</priority>
|
||||
</url>
|
||||
<url>
|
||||
<loc>{{ site.url }}/blog</loc>
|
||||
<lastmod>{{ site.time | date: '%Y-%m-%d' }}</lastmod>
|
||||
<priority>0.8</priority>
|
||||
</url>
|
||||
{% for post in site.posts %}
|
||||
<url>
|
||||
<loc>{{ post.url | absolute_url }}</loc>
|
||||
<lastmod>{{ post.date | date: '%Y-%m-%d' }}</lastmod>
|
||||
<priority>0.6</priority>
|
||||
</url>
|
||||
{% endfor %}
|
||||
</urlset>
|
||||
Loading…
Add table
Add a link
Reference in a new issue