Move posts from Wordpress to physical markdown

This commit is contained in:
Lewis Dale 2023-12-26 14:35:09 +00:00
parent e828f9f590
commit c8c5981bd3
147 changed files with 6219 additions and 98 deletions

View File

View File

@ -0,0 +1,6 @@
module.exports = function (eleventyConfig) {
eleventyConfig.addFilter('excerpt', (content, limit = 250) => {
const excerpt = content.replace(/(<([^>]+)>)/gi, "").slice(0, limit) + " […]";
return excerpt;
});
}

View File

@ -1,9 +1,12 @@
const dateFilters = require('./dates'); const dateFilters = require('./dates');
const arrayFilters = require('./arrays'); const arrayFilters = require('./arrays');
const excerptFilter = require('./excerpt');
module.exports = function(eleventyConfig) { module.exports = function(eleventyConfig) {
eleventyConfig.addPlugin(dateFilters); eleventyConfig.addPlugin(dateFilters);
eleventyConfig.addPlugin(arrayFilters); eleventyConfig.addPlugin(arrayFilters);
eleventyConfig.addPlugin(excerptFilter);
eleventyConfig.addFilter('keys', obj => Object.keys(obj)) eleventyConfig.addFilter('keys', obj => Object.keys(obj))
eleventyConfig.addFilter('json', obj => JSON.stringify(obj, null, 2));
} }

520
package-lock.json generated
View File

@ -17,6 +17,7 @@
"@toycode/markdown-it-class": "^1.2.4", "@toycode/markdown-it-class": "^1.2.4",
"autoprefixer": "^10.4.13", "autoprefixer": "^10.4.13",
"cssnano": "^6.0.1", "cssnano": "^6.0.1",
"glob": "^10.3.10",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"markdown-it-abbr": "^1.0.4", "markdown-it-abbr": "^1.0.4",
"markdown-it-anchor": "^8.6.6", "markdown-it-anchor": "^8.6.6",
@ -262,6 +263,95 @@
"resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz", "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz",
"integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==" "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg=="
}, },
"node_modules/@isaacs/cliui": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
"integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
"dependencies": {
"string-width": "^5.1.2",
"string-width-cjs": "npm:string-width@^4.2.0",
"strip-ansi": "^7.0.1",
"strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
"wrap-ansi": "^8.1.0",
"wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/@isaacs/cliui/node_modules/ansi-styles": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/@isaacs/cliui/node_modules/emoji-regex": {
"version": "9.2.2",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
"integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
},
"node_modules/@isaacs/cliui/node_modules/string-width": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
"integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
"dependencies": {
"eastasianwidth": "^0.2.0",
"emoji-regex": "^9.2.2",
"strip-ansi": "^7.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/@isaacs/cliui/node_modules/strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/@isaacs/cliui/node_modules/wrap-ansi": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
"integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
"dependencies": {
"ansi-styles": "^6.1.0",
"string-width": "^5.0.1",
"strip-ansi": "^7.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/@nodelib/fs.scandir": { "node_modules/@nodelib/fs.scandir": {
"version": "2.1.5", "version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
@ -294,6 +384,15 @@
"node": ">= 8" "node": ">= 8"
} }
}, },
"node_modules/@pkgjs/parseargs": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
"integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
"optional": true,
"engines": {
"node": ">=14"
}
},
"node_modules/@sindresorhus/slugify": { "node_modules/@sindresorhus/slugify": {
"version": "1.1.2", "version": "1.1.2",
"resolved": "https://registry.npmjs.org/@sindresorhus/slugify/-/slugify-1.1.2.tgz", "resolved": "https://registry.npmjs.org/@sindresorhus/slugify/-/slugify-1.1.2.tgz",
@ -1309,6 +1408,11 @@
"url": "https://github.com/fb55/domutils?sponsor=1" "url": "https://github.com/fb55/domutils?sponsor=1"
} }
}, },
"node_modules/eastasianwidth": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
},
"node_modules/ee-first": { "node_modules/ee-first": {
"version": "1.1.1", "version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
@ -1632,6 +1736,25 @@
"node": "^10.12.0 || >=12.0.0" "node": "^10.12.0 || >=12.0.0"
} }
}, },
"node_modules/flat-cache/node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/flat-cache/node_modules/rimraf": { "node_modules/flat-cache/node_modules/rimraf": {
"version": "3.0.2", "version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
@ -1659,6 +1782,21 @@
"is-callable": "^1.1.3" "is-callable": "^1.1.3"
} }
}, },
"node_modules/foreground-child": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
"integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
"dependencies": {
"cross-spawn": "^7.0.0",
"signal-exit": "^4.0.1"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/fraction.js": { "node_modules/fraction.js": {
"version": "4.2.0", "version": "4.2.0",
"resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz",
@ -1777,19 +1915,21 @@
"integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==" "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw=="
}, },
"node_modules/glob": { "node_modules/glob": {
"version": "7.2.3", "version": "10.3.10",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==",
"dependencies": { "dependencies": {
"fs.realpath": "^1.0.0", "foreground-child": "^3.1.0",
"inflight": "^1.0.4", "jackspeak": "^2.3.5",
"inherits": "2", "minimatch": "^9.0.1",
"minimatch": "^3.1.1", "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0",
"once": "^1.3.0", "path-scurry": "^1.10.1"
"path-is-absolute": "^1.0.0" },
"bin": {
"glob": "dist/esm/bin.mjs"
}, },
"engines": { "engines": {
"node": "*" "node": ">=16 || 14 >=14.17"
}, },
"funding": { "funding": {
"url": "https://github.com/sponsors/isaacs" "url": "https://github.com/sponsors/isaacs"
@ -1806,6 +1946,36 @@
"node": ">= 6" "node": ">= 6"
} }
}, },
"node_modules/glob/node_modules/brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"dependencies": {
"balanced-match": "^1.0.0"
}
},
"node_modules/glob/node_modules/minimatch": {
"version": "9.0.3",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
"integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
"dependencies": {
"brace-expansion": "^2.0.1"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/glob/node_modules/minipass": {
"version": "7.0.4",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz",
"integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==",
"engines": {
"node": ">=16 || 14 >=14.17"
}
},
"node_modules/globalthis": { "node_modules/globalthis": {
"version": "1.0.3", "version": "1.0.3",
"resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz",
@ -2392,6 +2562,23 @@
"node": ">=6.0" "node": ">=6.0"
} }
}, },
"node_modules/jackspeak": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz",
"integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==",
"dependencies": {
"@isaacs/cliui": "^8.0.2"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
},
"optionalDependencies": {
"@pkgjs/parseargs": "^0.11.0"
}
},
"node_modules/jake": { "node_modules/jake": {
"version": "10.8.5", "version": "10.8.5",
"resolved": "https://registry.npmjs.org/jake/-/jake-10.8.5.tgz", "resolved": "https://registry.npmjs.org/jake/-/jake-10.8.5.tgz",
@ -2639,6 +2826,14 @@
"resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
"integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ=="
}, },
"node_modules/lru-cache": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.1.0.tgz",
"integrity": "sha512-/1clY/ui8CzjKFyjdvwPWJUYKiFVXG2I2cY0ssG7h4+hwk+XOIX7ZSG9Q7TW8TW3Kp3BUSqgFWBLgL4PJ+Blag==",
"engines": {
"node": "14 || >=16.14"
}
},
"node_modules/luxon": { "node_modules/luxon": {
"version": "3.2.1", "version": "3.2.1",
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.2.1.tgz", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.2.1.tgz",
@ -3385,6 +3580,29 @@
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
}, },
"node_modules/path-scurry": {
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz",
"integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==",
"dependencies": {
"lru-cache": "^9.1.1 || ^10.0.0",
"minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
},
"engines": {
"node": ">=16 || 14 >=14.17"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/path-scurry/node_modules/minipass": {
"version": "7.0.4",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz",
"integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==",
"engines": {
"node": ">=16 || 14 >=14.17"
}
},
"node_modules/path-to-regexp": { "node_modules/path-to-regexp": {
"version": "6.2.1", "version": "6.2.1",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz",
@ -4478,6 +4696,25 @@
"rimraf": "bin.js" "rimraf": "bin.js"
} }
}, },
"node_modules/rimraf/node_modules/glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
},
"engines": {
"node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/run-parallel": { "node_modules/run-parallel": {
"version": "1.2.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
@ -4636,6 +4873,17 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/signal-exit": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/simple-concat": { "node_modules/simple-concat": {
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz",
@ -4795,6 +5043,20 @@
"node": ">=8" "node": ">=8"
} }
}, },
"node_modules/string-width-cjs": {
"name": "string-width",
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"dependencies": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/string.prototype.padend": { "node_modules/string.prototype.padend": {
"version": "3.1.4", "version": "3.1.4",
"resolved": "https://registry.npmjs.org/string.prototype.padend/-/string.prototype.padend-3.1.4.tgz", "resolved": "https://registry.npmjs.org/string.prototype.padend/-/string.prototype.padend-3.1.4.tgz",
@ -4848,6 +5110,18 @@
"node": ">=8" "node": ">=8"
} }
}, },
"node_modules/strip-ansi-cjs": {
"name": "strip-ansi",
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"dependencies": {
"ansi-regex": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/strip-bom": { "node_modules/strip-bom": {
"version": "3.0.0", "version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
@ -5315,6 +5589,23 @@
"url": "https://github.com/chalk/wrap-ansi?sponsor=1" "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
} }
}, },
"node_modules/wrap-ansi-cjs": {
"name": "wrap-ansi",
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"dependencies": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/wrappy": { "node_modules/wrappy": {
"version": "1.0.2", "version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
@ -5554,6 +5845,64 @@
"resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz", "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz",
"integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==" "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg=="
}, },
"@isaacs/cliui": {
"version": "8.0.2",
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
"integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
"requires": {
"string-width": "^5.1.2",
"string-width-cjs": "npm:string-width@^4.2.0",
"strip-ansi": "^7.0.1",
"strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
"wrap-ansi": "^8.1.0",
"wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
},
"dependencies": {
"ansi-regex": {
"version": "6.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
"integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA=="
},
"ansi-styles": {
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
"integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug=="
},
"emoji-regex": {
"version": "9.2.2",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
"integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
},
"string-width": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
"integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
"requires": {
"eastasianwidth": "^0.2.0",
"emoji-regex": "^9.2.2",
"strip-ansi": "^7.0.1"
}
},
"strip-ansi": {
"version": "7.1.0",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
"integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
"requires": {
"ansi-regex": "^6.0.1"
}
},
"wrap-ansi": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
"integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
"requires": {
"ansi-styles": "^6.1.0",
"string-width": "^5.0.1",
"strip-ansi": "^7.0.1"
}
}
}
},
"@nodelib/fs.scandir": { "@nodelib/fs.scandir": {
"version": "2.1.5", "version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
@ -5577,6 +5926,12 @@
"fastq": "^1.6.0" "fastq": "^1.6.0"
} }
}, },
"@pkgjs/parseargs": {
"version": "0.11.0",
"resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
"integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
"optional": true
},
"@sindresorhus/slugify": { "@sindresorhus/slugify": {
"version": "1.1.2", "version": "1.1.2",
"resolved": "https://registry.npmjs.org/@sindresorhus/slugify/-/slugify-1.1.2.tgz", "resolved": "https://registry.npmjs.org/@sindresorhus/slugify/-/slugify-1.1.2.tgz",
@ -6273,6 +6628,11 @@
"domhandler": "^4.2.0" "domhandler": "^4.2.0"
} }
}, },
"eastasianwidth": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
"integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
},
"ee-first": { "ee-first": {
"version": "1.1.1", "version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
@ -6532,6 +6892,19 @@
"rimraf": "^3.0.2" "rimraf": "^3.0.2"
}, },
"dependencies": { "dependencies": {
"glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
}
},
"rimraf": { "rimraf": {
"version": "3.0.2", "version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
@ -6555,6 +6928,15 @@
"is-callable": "^1.1.3" "is-callable": "^1.1.3"
} }
}, },
"foreground-child": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
"integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==",
"requires": {
"cross-spawn": "^7.0.0",
"signal-exit": "^4.0.1"
}
},
"fraction.js": { "fraction.js": {
"version": "4.2.0", "version": "4.2.0",
"resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz",
@ -6632,16 +7014,38 @@
"integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==" "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw=="
}, },
"glob": { "glob": {
"version": "7.2.3", "version": "10.3.10",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==",
"requires": { "requires": {
"fs.realpath": "^1.0.0", "foreground-child": "^3.1.0",
"inflight": "^1.0.4", "jackspeak": "^2.3.5",
"inherits": "2", "minimatch": "^9.0.1",
"minimatch": "^3.1.1", "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0",
"once": "^1.3.0", "path-scurry": "^1.10.1"
"path-is-absolute": "^1.0.0" },
"dependencies": {
"brace-expansion": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
"requires": {
"balanced-match": "^1.0.0"
}
},
"minimatch": {
"version": "9.0.3",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
"integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
"requires": {
"brace-expansion": "^2.0.1"
}
},
"minipass": {
"version": "7.0.4",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz",
"integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ=="
}
} }
}, },
"glob-parent": { "glob-parent": {
@ -7040,6 +7444,15 @@
"resolved": "https://registry.npmjs.org/iso-639-1/-/iso-639-1-2.1.15.tgz", "resolved": "https://registry.npmjs.org/iso-639-1/-/iso-639-1-2.1.15.tgz",
"integrity": "sha512-7c7mBznZu2ktfvyT582E2msM+Udc1EjOyhVRE/0ZsjD9LBtWSm23h3PtiRh2a35XoUsTQQjJXaJzuLjXsOdFDg==" "integrity": "sha512-7c7mBznZu2ktfvyT582E2msM+Udc1EjOyhVRE/0ZsjD9LBtWSm23h3PtiRh2a35XoUsTQQjJXaJzuLjXsOdFDg=="
}, },
"jackspeak": {
"version": "2.3.6",
"resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz",
"integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==",
"requires": {
"@isaacs/cliui": "^8.0.2",
"@pkgjs/parseargs": "^0.11.0"
}
},
"jake": { "jake": {
"version": "10.8.5", "version": "10.8.5",
"resolved": "https://registry.npmjs.org/jake/-/jake-10.8.5.tgz", "resolved": "https://registry.npmjs.org/jake/-/jake-10.8.5.tgz",
@ -7228,6 +7641,11 @@
"resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
"integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ=="
}, },
"lru-cache": {
"version": "10.1.0",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.1.0.tgz",
"integrity": "sha512-/1clY/ui8CzjKFyjdvwPWJUYKiFVXG2I2cY0ssG7h4+hwk+XOIX7ZSG9Q7TW8TW3Kp3BUSqgFWBLgL4PJ+Blag=="
},
"luxon": { "luxon": {
"version": "3.2.1", "version": "3.2.1",
"resolved": "https://registry.npmjs.org/luxon/-/luxon-3.2.1.tgz", "resolved": "https://registry.npmjs.org/luxon/-/luxon-3.2.1.tgz",
@ -7769,6 +8187,22 @@
"resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
"integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
}, },
"path-scurry": {
"version": "1.10.1",
"resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz",
"integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==",
"requires": {
"lru-cache": "^9.1.1 || ^10.0.0",
"minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
},
"dependencies": {
"minipass": {
"version": "7.0.4",
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz",
"integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ=="
}
}
},
"path-to-regexp": { "path-to-regexp": {
"version": "6.2.1", "version": "6.2.1",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz",
@ -8504,6 +8938,21 @@
"integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
"requires": { "requires": {
"glob": "^7.1.3" "glob": "^7.1.3"
},
"dependencies": {
"glob": {
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"requires": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
"inherits": "2",
"minimatch": "^3.1.1",
"once": "^1.3.0",
"path-is-absolute": "^1.0.0"
}
}
} }
}, },
"run-parallel": { "run-parallel": {
@ -8604,6 +9053,11 @@
"object-inspect": "^1.9.0" "object-inspect": "^1.9.0"
} }
}, },
"signal-exit": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="
},
"simple-concat": { "simple-concat": {
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz",
@ -8711,6 +9165,16 @@
"strip-ansi": "^6.0.1" "strip-ansi": "^6.0.1"
} }
}, },
"string-width-cjs": {
"version": "npm:string-width@4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
"integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
"requires": {
"emoji-regex": "^8.0.0",
"is-fullwidth-code-point": "^3.0.0",
"strip-ansi": "^6.0.1"
}
},
"string.prototype.padend": { "string.prototype.padend": {
"version": "3.1.4", "version": "3.1.4",
"resolved": "https://registry.npmjs.org/string.prototype.padend/-/string.prototype.padend-3.1.4.tgz", "resolved": "https://registry.npmjs.org/string.prototype.padend/-/string.prototype.padend-3.1.4.tgz",
@ -8749,6 +9213,14 @@
"ansi-regex": "^5.0.1" "ansi-regex": "^5.0.1"
} }
}, },
"strip-ansi-cjs": {
"version": "npm:strip-ansi@6.0.1",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
"integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
"requires": {
"ansi-regex": "^5.0.1"
}
},
"strip-bom": { "strip-bom": {
"version": "3.0.0", "version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
@ -9072,6 +9544,16 @@
"strip-ansi": "^6.0.0" "strip-ansi": "^6.0.0"
} }
}, },
"wrap-ansi-cjs": {
"version": "npm:wrap-ansi@7.0.0",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
"integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
"requires": {
"ansi-styles": "^4.0.0",
"string-width": "^4.1.0",
"strip-ansi": "^6.0.0"
}
},
"wrappy": { "wrappy": {
"version": "1.0.2", "version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",

View File

@ -25,6 +25,7 @@
"@toycode/markdown-it-class": "^1.2.4", "@toycode/markdown-it-class": "^1.2.4",
"autoprefixer": "^10.4.13", "autoprefixer": "^10.4.13",
"cssnano": "^6.0.1", "cssnano": "^6.0.1",
"glob": "^10.3.10",
"lodash": "^4.17.21", "lodash": "^4.17.21",
"markdown-it-abbr": "^1.0.4", "markdown-it-abbr": "^1.0.4",
"markdown-it-anchor": "^8.6.6", "markdown-it-anchor": "^8.6.6",

View File

@ -104,7 +104,7 @@ class PostCache {
const cache = new PostCache(); const cache = new PostCache();
module.exports = async function() { // module.exports = async function() {
await cache.fetchLatest(); // await cache.fetchLatest();
return cache; // return cache;
} // }

46
src/_data/old_posts.js Normal file
View File

@ -0,0 +1,46 @@
const postCache = require('./_postData');
const dateSort = (a, b) => new Date(b.date) - new Date(a.date);
const mapComment = comment => {
return ({
author: {
name: comment.author_name,
avatars: comment.author_avatar_urls,
url: comment.author_url,
},
content: comment.content.rendered,
canonical: comment.meta.url,
date: comment.date,
});
}
module.exports = async () => {
// const cache = await postCache();
// const posts = Object.values(cache.posts)
// .sort(dateSort)
// .map(post => ({
// ...post,
// comments: Object.values(post.comments)
// .sort(dateSort)
// .reduce((comments, comment) => {
// if (!comments[comment.type]) {
// comments[comment.type] = [];
// }
// comments[comment.type].push(mapComment(comment));
// return comments;
// }, { like: [], reply: [], repost: [] }),
// tags: post.tags.map(tag => ({
// name: cache.tags[tag].name,
// slug: cache.tags[tag].slug,
// link: cache.tags[tag].link
// }))
// }));
// return posts;
return [];
};

View File

@ -1,44 +0,0 @@
const postCache = require('./_postData');
const dateSort = (a, b) => new Date(b.date) - new Date(a.date);
const mapComment = comment => {
return ({
author: {
name: comment.author_name,
avatars: comment.author_avatar_urls,
url: comment.author_url,
},
content: comment.content.rendered,
canonical: comment.meta.url,
date: comment.date,
});
}
module.exports = async () => {
const cache = await postCache();
const posts = Object.values(cache.posts)
.sort(dateSort)
.map(post => ({
...post,
comments: Object.values(post.comments)
.sort(dateSort)
.reduce((comments, comment) => {
if (!comments[comment.type]) {
comments[comment.type] = [];
}
comments[comment.type].push(mapComment(comment));
return comments;
}, { like: [], reply: [], repost: [] }),
tags: post.tags.map(tag => ({
name: cache.tags[tag].name,
slug: cache.tags[tag].slug,
link: cache.tags[tag].link
}))
}));
return posts;
};

View File

@ -2,14 +2,16 @@ const postCache = require('./_postData');
module.exports = async () => { module.exports = async () => {
const cache = await postCache(); // const cache = await postCache();
return Object.values(cache.tags).map(tag => { // return Object.values(cache.tags).map(tag => {
const posts = Object.values(cache.posts).filter(post => post.tags.includes(tag.id)).sort((a, b) => new Date(b.date).getTime() - new Date(a.date).getTime()); // const posts = Object.values(cache.posts).filter(post => post.tags.includes(tag.id)).sort((a, b) => new Date(b.date).getTime() - new Date(a.date).getTime());
return { // return {
...tag, // ...tag,
posts, // posts,
} // }
}); // });
return [];
}; };

View File

@ -1,15 +1,11 @@
--- ---
layout: base.njk layout: base.njk
includePrism: true includePrism: true
eleventyComputed:
title: "{{ post.title.rendered | safe }}"
description: "{{ post.yoast_head_json.description }}"
--- ---
<main class="wrapper-md stack-lg"> <main class="wrapper-md stack-lg">
<article class="stack-md h-entry"> <article class="stack-md h-entry">
<h1 class="p-name"><a href="{{ post.url }}" class="u-url">{{ title | safe }}</a></h1> <h1 class="p-name"><a href="{{ url }}" class="u-url">{{ title | safe }}</a></h1>
<p class="published">Published: <time class="dt-published" datetime="{{ post.date | rfc3339 }}">{{ post.date | dateDisplay }}</time></p> <p class="published">Published: <time class="dt-published" datetime="{{ date | rfc3339 }}">{{ date | dateDisplay }}</time></p>
{{ post.excerpt.rendered | safe }}
<div class="e-content stack-md"> <div class="e-content stack-md">
{{ content | safe }} {{ content | safe }}
</div> </div>

View File

@ -2,8 +2,9 @@
title: Blog title: Blog
layout: base.njk layout: base.njk
pagination: pagination:
data: posts data: collections.posts
size: 5 size: 5
reverse: true
--- ---
<main class="wrapper-md stack-lg"> <main class="wrapper-md stack-lg">
<h1>Blog posts</h1> <h1>Blog posts</h1>
@ -11,10 +12,10 @@ pagination:
<ol class="stack-xl" role='list'> <ol class="stack-xl" role='list'>
{% for item in pagination.items %} {% for item in pagination.items %}
<li class="stack-xs"> <li class="stack-xs">
<h2><a href="/post/{{ item.slug }}">{{ item.title.rendered | safe }}</a></h2> <h2><a href="{{ item.url }}">{{ item.data.title | safe }}</a></h2>
<time class="block" datetime="{{ item.date | rfc3339 }}">{{ item.date | dateDisplay }}</time> <time class="block" datetime="{{ item.date | rfc3339 }}">{{ item.date | dateDisplay }}</time>
{{ item.excerpt.rendered | safe }} <div class="e-content p-summary">{{ item.content | excerpt }}</div>
<a href="/post/{{ item.slug }}" class="inline-block">Read more</a> <a href="{{ item.url }}" class="inline-block">Read more</a>
</li> </li>
{% endfor %} {% endfor %}
</ol> </ol>

View File

@ -1,11 +0,0 @@
---
layout: post.njk
pagination:
data: posts
alias: post
size: 1
eleventyComputed:
permalink: "post/{{ post.slug }}/"
---
{{ post.markdown | replace("cms.lewisdale.dev/wp-content/", "cdn.lewisdale.dev/wp-content/") | md | safe }}

View File

@ -0,0 +1,93 @@
---
title: "Raspberry Pi &#038; Ultrasonic Module &#8211; Calculating Distance in C"
date: 2014-05-23T00:00:00
slug: raspberry-pi-ultrasonic-module-calculating-distance-in-c
---
In this brief article I will be posting the C code I used to measure the distance between the Raspberry Pi and any object in front of the HC-SR04 module. I used this article to show me how to wire up the ultrasonic module using a breadboard. You will also need to download and install wiringPi from the wiringPi website, full details [here](http://wiringpi.com/download-and-install/).
The code is based on the Python script from [this article](http://www.raspberrypi-spy.co.uk/2012/12/ultrasonic-distance-measurement-using-python-part-1/) written by Matt Hawkins.
```c
/*
Measures the distance between the Pi and an object using an ultrasonic module
Original source by Matt Hawkins (09/01/2013, http://www.raspberrypi-spy.co.uk/2012/12/ultrasonic-distance-measurement-using-python-part-1/) in Python
Written in C by Lewis Dale (09/04/2014, http://blog.lewisdale.co.uk)
WiringPi Library by Gordon Henderson (http://wiringpi.com)
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <wiringPi.h>
/*Define the GPIO pins using WiringPi Pin Numbers*/
#define GPIO_TRIGGER 4
#define GPIO_ECHO 5
int main()
{
/*Set up the Pi for GIPO*/
wiringPiSetup ();
printf("Ultrasonic Measurement ");
while(1)
{
double distance;
int i;
distance = 0;
/*Loop 5 times for greater accuracy*/
for(i=0;i<5;i++)
{
unsigned int start, end;
double total;
/*Set the trig pin to output, and the echo pin to input*/
pinMode(GPIO_TRIGGER,OUTPUT);
pinMode(GPIO_ECHO,INPUT);
/*Make sure the module is not outputting*/
digitalWrite(GPIO_TRIGGER, LOW);
/*500ms delay to allow the module to settle*/
delay(500);
/*Send a 10us pulse to trigger*/
digitalWrite(GPIO_TRIGGER,HIGH);
delayMicroseconds (10);
digitalWrite(GPIO_TRIGGER,LOW);
start = millis();
while(digitalRead(GPIO_ECHO) == 0)
{
start = millis();
}
while(digitalRead(GPIO_ECHO) == 1)
{
end = millis();
}
/*Calculate pulse length and convert to seconds*/
total = (double) (end-start) / 1000;
/*Distance travelled is time * speed of sound (cm/s)
Halved as we only want distance to object*/
total = total * 34300;
total = total / 2;
/*Append to total distance*/
distance += (double) total;
}
/*Calculate mean distance*/
distance = distance/5;
printf("Distance: %f ",distance);
}
return EXIT_SUCCESS;
}
```
## Compilation instructions
To compile the script, save it as `usonic.c` and compile using gcc, including the wiringPi library:
```bash
gcc -Wall -o usonic usonic.c -lwiringPi
```

View File

@ -0,0 +1,48 @@
---
title: "Solving 2048 Using A* Search"
date: 2014-05-23T00:00:00
slug: solving-2048-using-a-search
---
ne of my recent projects has been to attempt to solve the game 2048 using A* Search - it all started from a bet with my girlfriend about who could get the highest score, and I decided Id “cheat” and just get my computer to do it for me. It didnt work, she still managed to get to the 2048 tile first.
To start with, I wrote a command-line version of the 2048 game in Java - it was fairly simple, if a little unncessary, and worked well - I even had a little play of it before implementing the A* algorithm, and it was fairly fun to play. There were no real issues here, just a small amount of confusion about how to implement the “gravity” style of tile movement, but a little thought sorted that one out.
Then it came to actually writing the A* Search. I was lucky, in that I had a template from a previous University assignment to work from. All there really was to do was swap a few classes and methods, and change the heuristics.
## The Heuristic
The heuristic I am using at the minute is a less-than-optimal one, but it was the first one I tried. I was actually quite surprised at how effective it was.
```
(0 - sum of tiles) + solution depth
```
Like I say, this is not optimal, and certainly does not provide the highest-scoring solutions. But it does give fairly high scores, and certainly finds the 2048 tile - and even the 4096 tile.
Oher possible heuristics include:
* (0 - score) + solution depth
* Difference between largest tile and 2048 tile
* Mean value of tiles
There are a lot of options, and I have seen some impressive implementations. I look forward to improving this further.
## The Pseudocode
Heres a snippet of pseudocode for the A* algorithm:
```
While queue is not empty
if game is solved
print current state
end running
else
get next state from queue
add children of current state to queue
endwhile
```
## Screenshots
![Screenshot of the output of the 2048 game, with the text "Solution Found!", and an in-progress 2048 game with a maximum score of 2048](./src/images/2048_29.png)
![Screenshot of the output of the 2048 game, with the text "Solution Found!", and an in-progress 2048 game with a maximum score of 4096](./src/images/4096_28.png)

View File

@ -0,0 +1,136 @@
---
title: "Clever Image Resizing In Python"
date: 2014-07-23T00:00:00
slug: clever-image-resizing-in-python
---
In the not-too-distant past, I had a bit of trouble with a client who wanted to have images resized on the fly that were of a completely different aspect ratio - i.e. they wanted images that were orignally landscape to be square, with no skewing of the image. Now some people will note that thats impossible, because while Im quite handy with PHP, my class for distoring the laws of Physics still gets odd errors. I ended up turning to a class sourced from [White Hat Web Design](http://www.white-hat-web-design.co.uk/blog/resizing-images-with-php/), which was quite clever and took the largest possible area from the centre of the image that matched the target size ratio, and then just resized that. This meant there was a little cropping, but no skewing, and the client was happy.
More recently, Ive decided that it was absolutely necessary to re-write this particular feature in Python, which will allow me to run it from the command-line and cleanly resize batches of images. I cant imagine Ill have much use for this particular script, but someone who takes a lot of pictures may.
And heres an example of how it works:
Original file, 2048x1365 (courtesy of [Charlie Newey](https://assemblyco.de)):
![Full-size photo of a castle](./src/images/aber_castle_32-1024x683.jpg)
![Photograph of the castle, resized to a square and cropped](./src/images/aber_castle-resized_30.jpg)
![The same photograph of a castle but cropped to a banner-shape](./src/images/aber_castle-resized_31.jpg)
Heres the script, available for download [here](https://github.com/LewisDaleUK/PyResize):
```python
#!/usr/bin/python
import PIL
from PIL import Image
import sys
import os
"""
Resizes images to a given size, retaining the target aspect ratio, with no stretching. WARNING: Will crop images
Resizing on the SimpleImage PHP class by Simon Jarvis
http://www.white-hat-web-design.co.uk/articles/php-image-resizing.php
"""
#Divides two numbers
def divide(x,y):
return float(x)/float(y)
#Gets the filenames from the command-line arguments
def get_files_from_args():
files = []
if(len(sys.argv) > 3):
for args in sys.argv[1:len(sys.argv) -2]:
files.append(args)
return files
#Gets the target size from the command-line arguments
def get_size_from_args():
sys_len = len(sys.argv)
return (int(sys.argv[sys_len - 1]), int(sys.argv[sys_len - 2]))
def resize(image, size):
return image.resize(size,Image.ANTIALIAS)
#Regular resizing - not very good
def resize_images(files,size):
for file in files:
try :
image = Image.open(file)
filename, ext = os.path.splitext(file)
image.thumbnail(size, PIL.Image.ANTIALIAS)
image.save(filename + "-resized" + ext)
except IOError:
print "File " + file + " not found"
#Crops an image to a height
def crop_to_height(image,theight,twidth):
width,height = image.size
diff = height - theight
y = diff/2
left = 0
right = width
bottom = height - y
top = y
return image.crop((left,top,right,bottom))
#Crops an image to a width
def crop_to_width(image,theight,twidth):
width,height = image.size
diff = width - twidth
x = diff/2
left = x
right = width - x
bottom = height
top = 0
return image.crop((left,top,right,bottom))
#Resizes an image to a width
def resize_to_width(image,twidth):
width,height = image.size
ratio = divide(twidth,width)
height = height * ratio
return resize(image, (int(twidth), int(height)))
#Resies an image to a height
def resize_to_height(image,theight):
width,height = image.size
ratio = divide(theight,height)
width = width * ratio
return resize(image,(int(width),int(theight)))
#Clean resizing, with no stretching
def resize_super_intelligent(files,size):
for file in files:
try:
image = Image.open(file)
filename, ext = os.path.splitext(file)
width, height = image.size
twidth, theight = size
w_ratio = divide(twidth, width)
h_ratio = divide(theight,height)
if((height * w_ratio) >= theight):
image = resize_to_width(image,twidth)
image = crop_to_height(image,theight,twidth)
elif((width * h_ratio) >= twidth):
image = resize_to_height(image,theight)
image = crop_to_width(image, theight, twidth)
image.resize(image,size)
image.save(filename + "-resized" + ext)
except IOError:
print "File " + file + " not found"
resize_super_intelligent(get_files_from_args(), get_size_from_args())
```
Then to run you can just use:
```bash
python resize.py file1.ext file2.ext... height width
```

View File

@ -0,0 +1,80 @@
---
title: "Motion Sensitive Image Capturing In Python"
date: 2014-07-28T00:00:00
slug: motion-sensitive-image-capturing-in-python
---
This is quite a nice little script that uses the PyGame library to access the computers webcam, and then takes a succession of photos, comparing percentage differences between the photos taken to determine whether or not any motion has occurred. If it does detect motion, then it captures 2 photos per second for 30 seconds and saves them to the file system.
While this script uses the PyGame library for image capture, it could easily be modified to use other libraries or image capture methods - such as using the camera module on a Raspberry Pi (see [this](http://www.raspberrypi.org/documentation/usage/camera/python/README.md) guide for instructions)
## Prerequisites
* [PyGame Library](http://www.pygame.org/news.html)
* Python Imaging Library (PIL) - [Pillow](http://pillow.readthedocs.org/en/latest/index.html)
* A webcam with up-to-date drivers installed
## The clever bit
The clever bit wasnt actually my idea - for a while I was struggling for ways to compare still images, until I found [this gem](http://rosettacode.org/wiki/Percentage_difference_between_images#Python) on Rosetta Code that returns a percentage difference between the two images.
So what this little bit of code actually does is:
* Turn the two arrays of image data into one array of tuples, with each tuple representing equivalent pixels in each image
* If the image is grayscale, just goes through each pixel and adds the difference in the numerical values of those pixels together
* If the image is in RGB mode, it does exactly the same but with the values of the different colour bands, instead of just the pixel values
* _ncomponents_ is the number of the components in the image (i.e. width _height_ number of bands)
* *hen get the percentage of “dif” relative to the number of possible colours in the image (255), and divide it by the number of components
Its a little convoluted, but it gives quite a good difference in the image.
One thing Im not 100% sure on is the boundary for motion capture - Ive set it to a 2.5% difference which worked well for me, but if youre outdoors its best to make that higher, to account for all of the extra movement (e.g. wind)
## The script
Download the script [here](/files/motion_33.py)
```python
import pygame
import pygame.camera as camera
import time
import pygame.image as im
from PIL import Image
from itertools import izip
import os
camera.init()
cam = camera.Camera(camera.list_cameras()[0],(640,480))
cam.start()
size = cam.get_size()
#This code is from Rosetta Code http://rosettacode.org/wiki/Percentage_difference_between_images#Python
def check_images(i1,i2):
i1 = im.tostring(i1,"RGB")
i1 = Image.frombytes("RGB",size,i1)
i2 = im.tostring(i2,"RGB")
i2 = Image.frombytes("RGB",size,i2)
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
dif = sum(abs(p1 - p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1 - c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = size[0] * size[1] * 3
return (dif / 255.0 * 100) / ncomponents
while 1:
i1 = cam.get_image()
time.sleep(1)
i2 = cam.get_image()
dif = check_images(i1,i2)
if dif > 2.5:
for x in range(0,30):
timestamp = time.strftime("%Y-%m-%d--%H:%M:%S")
image.save(cam.get_image(), timestamp + ".jpg")
time.sleep(0.5)
time.sleep(1)
```

View File

@ -0,0 +1,81 @@
---
title: "Using ES6 Generators"
date: 2015-02-26T00:00:00
slug: using-es6-generators
---
Ive been having fun using some newer technology recently - namely ECMAScript 6 Harmony (or at least the bits of it that Chrome supports). One particular feature which Ive been using has made life quite a lot easier for me - ES6 now supports Generator functions, and they can be used to implement iteration with objects.
## What Are Generators?
If youve never used them before, a Generator is a function that can be used as an iterator - I.E. you can use them in a for-each loop, much like you would an array. The difference between using a Generator and a function that returns an Iterable (e.g. an Array), is the Yield keyword, which acts like a return statement except that it returns the next iterable value.
This means that we dont have to have a list of the objects we want to iterate over, the list can be built as you iterate, meaning you only use as much memory as necessary for the loop, instead of the maximum amount needed to store the entire data structure.
Sometimes there are no apparent benefits to using Generators, but if you find yourself traversing a data structure repeatedly, youre going to have a lot of repeated code, and using Generators can save you the hassle of repeating yourself.
Generators arent unique to ECMAScript - they can be found in many languages. Read about using them in Python [here](https://blog.assemblyco.de/optimising-the-fibonacci-sequence-with-generators/).
## Using Generators
A Generator function is noted by `function*`:
```javascript
function* myGenerator() {
yield 1;
yield 2;
yield 3;
}
```
We can either use our Generator as an object:
```javascript
var gen = new myGenerator();
console.log(gen.next()); //1
console.log(gen.next()); //2
console.log(gen.next()); //3
```
Or access it as a function:
```javascript
for(var x of myGenerator()) {
console.log(x);
}
>> 1
>> 2
>> 3
```
But thats not much good to us! We want to be able to overwrite an Objects `Symbol.iterator`, so that we can iterate the object directly.
```javascript
function IterableObj() {
this.data = [1,2,3,4,5,6,7,8,9,10,11,12]
}
IterableObj.prototype[Symbol.iterator] = function* () {
for(var x of this.data) {
yield x * 4;
}
}
var myObj = new IterableObj();
for(var data of myObj) {
console.log(data);
}
>> 4
>> 8
>> 12
>> 16
>> ...
```
So those are the basics of using ES6 Generators. Check out some of the links below for further reading!
* [Iteration Protocols - Javascript MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols)
* [function* - Javascript MDN](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/function*)

View File

@ -0,0 +1,33 @@
---
title: "A new blog"
date: 2021-12-17T00:00:00
slug: a-new-blog
---
Its been a long time since Ive had an actively-maintained personal website/blog, but I got a spurt of inspiration after seeing a few other recently-revamped blogs. What better way to celebrate the end of the year than with… a blog?
My intention is to try and write a post on here relatively frequently, but well see how that goes as Im quite out of practice.
## [Tech stack](https://lewisdale.dev/post/a-new-blog/#tech-stack)
I wanted this website to achieve three things: be of my own design, be easy to update, and be accessible. To that end, I chose a relatively simple tech stack:
- [Eleventy](https://11ty.dev/)
- HTML
- CSS
And thats… just about it. Eleventy gives me more than enough functionality to write simple blog posts in Markdown, convert them to HTML, and display them on a page.
I did start out using [Tailwind](https://tailwindcss.com/), but after a little while and seeing some of the recent discourse around it, I decided I wanted to write all the styles myself from scratch. It was pretty easy to remove Tailwind from the stack, as I hadnt done too much work on it to begin with. Plus, it meant that I could get rid of PostCSS, which was giving me a headache when trying to serve both PostCSS and Eleventy at the same time.
I deployed the site using [Netlify](https://netlify.app/). It was my first time using it, and to be honest Im pretty impressed by how quickly I was able to get things up and running. It took maybe 3 minutes from signing up to getting a version of the site deployed (pointing the domain took longer thanks to pesky DNS propagation times).
## [Accessibility and Performance](https://lewisdale.dev/post/a-new-blog/#accessibility-and-performance)
I wanted some assurance that my website would be accessible, so I regularly tested my pages with [axe DevTools](https://www.deque.com/axe/devtools/) and Lighthouse in Chrome.
At the time of writing, there are no accessibility issues reported by Axe or Lighthouse, so thats a win!
![Congratulations! Found 0 issues automatically on the page](./src/images/axe-clean-output.png)
If anybody reading this does in fact spot or experience an accessibility issue, [please send me a DM or tweet on Twitter](https://twitter.com/LewisDaleUK).

View File

@ -0,0 +1,29 @@
---
title: "Year retrospective"
date: 2021-12-28T00:00:00
slug: year-retrospective
---
The last year has been a wild one, with some real highs and lows, so I thought Id do a short retro on the year and outline some (fairly vague) goals for the next year.
2021 has been both a very short and *extremely* long year. Short, because the first 10 or so weeks of the year I was on parental leave after the birth of my daughter at the end of December '20 and so those first few months are mostly just a blur that doesnt count. Long, because its been exhausting. Id argue that 2020 was less exhausting - the COVID and remote working situation was stressful, and the period of adjustment made working effectively quite difficult. However 2021 saw many of those same struggles continued, but that was coupled with a degree of malaise from almost everybody I encountered - people were *tired* and it really started to show.
In addition to that, some health scares and struggles within my family made it difficult for me to take a break throughout the year - the only significant holiday I took was cancelled as my daughter was hospitalised. On top of the general exhaustion that comes from raising a newborn (and one that is apparently capable of going without sleep). The result was that by around July, I burned out. Hard. It impacted my work, I pulled myself off the project I was working on, and eventually ended up leaving the company I was working at and took a short break. It mostly worked - I feel much better now than I did then, at least, but recurring burnout is something I have to watch for.
## [Some wins](https://lewisdale.dev/post/year-retrospective/#some-wins)
But its not all been doom-and-gloom! Ive had some great wins this year that I should definitely celebrate.
To start with, I decided to leave a job that honestly wasnt a good fit. I went into the company with a set of expectations, and in hindsight I think they were in conflict with the expectations that the company had for me. The result being that I was struggling to progress with my career and succeed; recognising that and deciding to leave was definitely the right decision.
Since then, Ive decided to start freelancing; I like being able to work in lots of different problem spaces, and this definitely gives me the agency and the freedom to do what I enjoy most. Its been pretty good to begin with - Ive got some steady work in now, but Id love to get some more variety in there.
Somebody that I had been helping to mentor was recently successful in applying for a junior engineering role, and I am absolutely overjoyed for them. I find mentoring really rewarding, and its always nice to see people achieve that first step into a software engineering career.
## [Goals for 2022](https://lewisdale.dev/post/year-retrospective/#goals-for-2022)
My (vague) goals for the next year are:
- Take some time off
- Try and build my freelance business further
- Ive got some ideas for products I would like to also build on the side

View File

@ -0,0 +1,68 @@
---
title: "Building a CMS for Eleventy"
date: 2022-10-07T00:00:00
slug: building-a-cms-for-eleventy
---
Three days ago, I tweeted this:
[https://twitter.com/LewisDaleUK/status/1577211142748807168](https://twitter.com/LewisDaleUK/status/1577211142748807168)).
I said I wouldnt be writing a CMS for Eleventy. It wasnt going to happen, theres no way. Im not in the business of reinventing the wheel.
Anyway, heres how I built a (very simple) CMS for an Eleventy site.
## [Why?](https://lewisdale.dev/post/building-a-cms-for-eleventy/#why)
I wanted to build a proof-of-concept for something Id had in mind a while ago, which was a little application that could build a static web page for a local café, and allow the owners to put together new menus and have them update without any intervention from a developer.
I knew it wasnt hard to use external data sources with Eleventy - this site uses one [to get book information for my reading list](https://lewisdale.dev/post/managing-my-reading-list/). What I wanted to do was seamlessly trigger that build and data retrieval.
Firstly I considered a different approach: committing files to a Git repository and pushing them. Thats fine in theory, but its very config-heavy, and relies on having an authenticated Github account attached, which isnt ideal. I want to be able to trigger the *actual* build.
## [How?](https://lewisdale.dev/post/building-a-cms-for-eleventy/#how)
At its core, this is just an Express server with an SQLite database and the [Eleventy programmatic API](https://www.11ty.dev/docs/programmatic/). I went with Express because it meant I could keep everything inside Javascript (well, Typescript), meaning I wouldnt have to execute commands from whatever platform Id written - simply, it makes it slightly easier from a package management perspective.
The flow is actually really simple. Once a user saves a menu, we trigger the Eleventy build in a separate directory. The directory contains a full Eleventy instance; this doesnt rely on the end-users configuration, as the API means I can inject what config I need and leave everything else untouched. This then builds it separately, and I can serve the files any way I want.
## [Issues encountered](https://lewisdale.dev/post/building-a-cms-for-eleventy/#issues-encountered)
The Eleventy Programmatic API isnt particularly well-documented, so I had to go digging through the code to work out what was going on in some spots. In particular, Id assumed that the paths I provided for output directories and config files were relative to the input path, but that proved to be false - theyre actually relative to the working directory. So while I thought I was looking for `.eleventy.js` in `/eleventy_dir/`, it was actually looking in the directory of the Express app.
This was also true for passthrough copies, which proved to be a slight issue - one of the things I didnt want to do was dictate how the Eleventy site should be configured. In the end, I found a “workaround” (read: horrible hack) that let me override the eleventyConfig.addPassthroughCopy function, and make relative paths absolute. Heres the code for it below:
```javascript
new Eleventy(
this._config.buildDir,
this._config.outputDir,
config: (eleventyConfig) => {
let addPassthrough = eleventyConfig.addPassthroughCopy.bind(eleventyConfig);
eleventyConfig.addPassthroughCopy = (file) => {
if (typeof file === "string") {
const filePath = {
[path.join(this._config.rootDir || "", file)]: file
}
return addPassthrough(filePath);
}
return addPassthrough(file);
}
eleventyConfig.addGlobalData("menus", () => {
return menus as CollectionItem[];
});
return {};
}
)
```
Like I said, a “workaround”.
## [Final thoughts](https://lewisdale.dev/post/building-a-cms-for-eleventy/#final-thoughts)
So this was a fun little experiment. Its very rough-and-ready and doesnt really do a lot, but it was good to spike out how that might be done. Eagle-eyed observers of the codebase well see that theres lots of boilerplate/half-finished code for other things I was working on. Im planning on adding more features to the server, and then hopefully building an MVP of the menu application.
I think there are a few use cases for this, but mostly its a good way to build content-managed websites that are updated relatively-infrequently. I think the thing that I like about it is that it is very unprescriptive. Your specific Eleventy configuration isnt important - it adds the data it needs, and then leaves it alone (well, everything except those file paths).
The source for the Express server can be found [on my Github](https://github.com/LewisDaleUK/11ty-building-example/).

View File

@ -0,0 +1,101 @@
---
title: "Handling file uploads and failures with Express"
date: 2022-10-25T00:00:00
slug: handling-file-uploads-and-failures-with-express
---
Another little snippet to remind myself later.
If I want to handle uploading files via a form with `enctype="multipart/form-data"`, I need to include a third-party library. In this case, [Multer](https://npmjs.org/package/multer) is the best choice.
The usage is pretty simple. At its most basic:
```typescript
import express from 'express';
import multer from 'multer';
import path from 'node:path';
const app = express();
const upload = multer({
dest: path.join(".", "public", "uploads")
});
app.post('/upload', upload.array('fieldname'), (req, res, next) => {
// Array of files are stored in req.files
});
```
## [Using it with Middleware](https://lewisdale.dev/post/handling-file-uploads-and-failures-with-express/#using-it-with-middleware)
Multer works well for what it does, but theres one issue with the default usage: if you have middleware that relies on form data, multer has to run *before* that middleware, or the `multipart/form-data` will not have been parsed. This is an issue when dealing with CSRF, because Multer doesnt do dry-runs - if it runs, the files get uploaded to the server.
### [Option 1: Remove files on error](https://lewisdale.dev/post/handling-file-uploads-and-failures-with-express/#option-1-remove-files-on-error)
The first approach is pretty simple: include middleware after both Multer and your middleware that relies on form data. Check the response status, and if its an error response (`4xx` or `5xx`), iterate over `req.files` and remove each file manually.
e.g.
```typescript
import fs from 'fs';
const removeFilesOnError = (req: Express.Request, res: Express.Response, next: Express.NextFunction) => {
if (res.statusCode >= 400) {
if (req.files) {
(req.files as Express.Multer.File[]).forEach(file => {
fs.unlinkSync(file.path);
});
} else if(req.file) {
fs.unlinkSync(req.file.path);
}
}
};
app.post('/upload', upload.array('fieldname'), myOtherMiddleware, removeFilesOnError, (req, res, next) => {
//
});
```
### [Option 2: Use MemoryStorage, and manually store files](https://lewisdale.dev/post/handling-file-uploads-and-failures-with-express/#option-2-use-memorystorage-and-manually-store-files)
By default, Multer uses `DiskStorage`, which automatically stores the files on your server. There is an option to use `MemoryStorage`, which adds a Buffer to each file containing the file contents. We can use this to only then store our files on a successful request:
```typescript
const storage = multer.memoryStorage();
const upload = multer({
storage,
});
const saveOnSuccess = (req: Express.Request, res: Express.Response, next: Express.NextFunction) => {
if (res.statusCode < 400) {
if (req.files) {
(req.files as Express.Multer.File[]).map(file => {
// req.container is a Dependency Injection container I've defined separately
// Substitute this line with any random string generator
const filename = req.container.get_random_string();
const storagePath = path.join(".", "public", "uploads", filename);
fs.createWriteStream(storagePath).write(file.buffer).close();
file.path = storagePath;
return file;
});
}
}
};
app.post('/upload', upload.array('fieldname'), myOtherMiddleware, saveOnSuccess, (req, res, next) => {
// req.files will contain an array of files where file.path is the saved location
});
```
Of the two approaches Ive listed, I think this is the one I prefer. Theyre both fairly easy to implement, but this one allows me a bit more control over when files get saved - theres no risk of a later error leaving malicious files on my server. The only caveat is that a large number of files (or a few very large files) will cause the application to run out of memory.
A third option could be to allow uploads via DiskStorage, but move them into an isolated storage until its determined that the request was successful, at which point they can be published.
Finally, a note to myself for when I inevitably face this issue again in the future:
**Do not attempt to write your own Mutlipart Form middleware, it wont work and youll lose at least a full day.**

View File

@ -0,0 +1,25 @@
---
title: "Moving my reading list out of JSON"
date: 2022-10-05T00:00:00
slug: moving-my-reading-list-out-of-json
---
I blogged about [hosting a reading list on this blog](https://lewisdale.dev/post/managing-my-reading-list/) a little while ago. At the time, I decided that storing the books as JSON would make sense - and it did, at the time. But as the list has grown, albeit not exponentially, it started to get a bit messy.
So I decided to move the reading list into a SQLite database, which seemed like a sensible choice.
## [Why SQLite?](https://lewisdale.dev/post/moving-my-reading-list-out-of-json/#why-sqlite)
I had two requirements for the list: it should be easy to update, and I should be able to update the blog from the Git repository. That meant an externally-hosted database was a no-go.
SQLite fit both of those criteria. Its a small file format, its only taking up 16kb on the repo right now. And its a fully-featured SQL database, which means not only can I easily update and store the records, I could also use it to draw out some statistics about my reading habits. I might do that in a future blog post, once Ive got a few more books on the list.
## [Integrating into Eleventy](https://lewisdale.dev/post/moving-my-reading-list-out-of-json/#integrating-into-eleventy)
This was exactly as painless as creating the original list was. Eleventy doesnt really care what data format I use, I just swapped the part of my data file that used the JSON data source, with a function that opened and read the SQLite database. And that was it. Everything else stayed exactly the same.
Ive not seen any noticable hit to build times, largely because reading the data locally isnt the bottleneck - getting data from the OpenLibrary API is.
## [Limits](https://lewisdale.dev/post/moving-my-reading-list-out-of-json/#limits)
For my use, this is absolutely fine. If this were a site where multiple people were creating pull requests and merging code in, I might reconsider it - SQLite files are binaries so theres no meaningful way for Git to diff them, making merge conflicts very common. But this is my site, and Ill merge whatever I want.

View File

@ -0,0 +1,44 @@
---
title: "Simple CSRF token middleware for express"
date: 2022-10-23T00:00:00
slug: simple-csrf-token-middleware-for-express
---
Ive been doing some Express development in Typescript recently, and I realised that there are no well-maintained CSRF libraries for Express anymore. Plus, CSRF is actually quite simple to implement and it meant I could avoid adding yet-another dependency to my project.
Heres the basic implementation. It depends on there already being some form of user sesion (in my case, its [express-session](https://www.npmjs.com/package/express-session)):
```typescript
import { Request, Response, NextFunction } from 'express';
export const csrfMiddleware = (req: Request, res: Response, next: NextFunction) => {
const previousCsrf = req.session.csrf;
// I've set up a manual dependency injection container with some middleware
// You can substitute this with whatever library you want to use to
// generate random strings
// In my case this is a wrapper for Node.Crypto.randomBytes
req.session.csrf = req.container.get_random_string(128);
// Make the token available to templates as a variable
res.locals.csrf = req.session.csrf;
if (req.method !== "GET" && (previousCsrf !== req.body["_csrf"] || !previousCsrf)) {
// Unless the request is a GET request, which doesn't use CSRF
// compare the CSRF from the request with the token stored in session
// Send a 403 Forbidden response if they don't match
// or there was no stored csrf
return res.sendStatus(403);
}
// Move to the next function
next();
}
```
Its a fairly basic implementation, but it hits most of the right notes. The tokens are regenerated on each request, so its relatively resilient to brute force attacks. It will reject requests if there is no csrf token in the session, which prevents attacks before a session has been created.
Its got flaws, naturally: the token is tied to a session ID, for one. The tokens are also not encrypted with any sort of secret so they are, in theory, exposed. But as a first pass its a decent start. Theres also an issue with using the back button, as this will no longer work with the regnerated csrf token.
The [OWASP CSRF Prevention Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Cross-Site_Request_Forgery_Prevention_Cheat_Sheet.html) has more information and advice on creating secure CSRF tokens. Some improvements to this approach include: encrypt the token, store it as a cookie with a time range, use SameSite cookie attributes.

View File

@ -0,0 +1,200 @@
---
title: "Adding comments to my blog"
date: 2022-11-05T00:00:00
slug: adding-comments-to-my-blog
---
Yet more Eleventy-related things!
I like to have a central place where people can interact with my posts if they wanted to. Right now, that place is Twitter, but it's sort of imploding at the minute and I'm unsure if I want to keep using it as much moving forward.
So, the easiest thing for me to do is to include a (very basic) comments system! At least then it allows people to leave a quick comment on the page.
**What this does:**
Allows people to leave a comment on a post
**What this doesn't do:**
This isn't going to be a fully-featured comments system to replace something like Disqus - there are no accounts, threads, tagging etc. It's just a simple `display name` + `message` combination.
I tried two approaches to adding comments before settling on the current one.
## Option 1: Using Netlify Forms with Functions
This was the first approach I used, which is called the [Jamstack Comments Engine](https://jamstack-comments.netlify.app). It uses Netlify Forms as a storage solution, which trigger a serverless function when a user comments. It puts the comments into a queue, which have to be manually approved. Once approved, they go into a different Form storage, which triggers a full build of the site. Whew.
### Pros
* Minimal setup
* Netlify Forms have built-in Spam protection
* Once the page is built there is no impact on performance because everything is static
### Cons
* Netlify Forms free tier only allows 100 submissions/month (and I'm not sure if that's across all forms)
* Can take a few minutes for the site to build & update - very slow feedback for users
* Data is coupled to Netlify
I didn't stick with this for very long purely because I didn't like how long the builds were taking.
## Option 2: Netlify Edge Functions
This was my second choice, and in my opinion works much better. It uses [Netlify Edge Functions](https://docs.netlify.com/edge-functions/overview/) with [Eleventy Edge](https://www.11ty.dev/docs/plugins/edge/) to save and retrieve comments on the edge.
Edge Functions & Eleventy Edge mean you can handle dynamic content, but parse it with Eleventy, and it's injected before the page is served. Provided the connection is fast, it shouldn't give any noticeable slowdown, but means that we're still only serving HTML & CSS.
Edge Functions run in Deno, so they can be written as Javascript or Typescript.
### Data storage
I could still use Netlify Forms for this, and just handle the comment retrieval using Edge functions. But instead, I decided to use [Supabase](https://supabase.com) as a data store for now. This means my data is just in Postgres, which makes migrating much easier. It also has a sizable free tier that should let me handle more comments than this site is ever likely to see.
### Handling submissions
After following the setup instructions in the Eleventy Edge documentation, I was left with a directory called `netlify/edge-functions/`. I added a new file, `comments.ts`, and then added the following:
```typescript
import type { Context } from "https://edge.netlify.com";
import * as supabase from 'https://deno.land/x/supabase_deno/mod.ts';
const sb = new supabase.supabaseClient(
Deno.env.get("DATABASE_URL"),
Deno.env.get("SUPABASE_SERVICE_API_KEY")
);
export default async (request: Request, context: Context) => {
const url = new URL(request.url);
if (url.pathname.includes("/post/") && request.method === "POST") {
// Save the comment
const body = await request.clone().formData();
const data = Object.fromEntries(body);
const comments = sb.tables().get("comments");
const res = await comments.items().add({
name: data.name,
comment: data.comment,
post: url.pathname,
});
return new Response(null, {
status: 302,
headers: {
location: url.pathname,
}
})
}
return context.next();
};
```
All this script is doing is checking that the request was sent to a Blog Post page, via a `POST` request. Then it attempts to store the comments in Supabase, and then redirects the user back to the Blog Post.
Then in my blog post template, I just need a simple form:
```twig
<form action="{{ page.url }}" method="POST">
<h2>Add a comment</h2>
<label for="name">Your name:</label>
<input type="text" name="name" id="name" />
<label for="comment">Your comment:</label>
<textarea name="comment" id="comment"></textarea>
<button type="submit">Add comment</button>
</form>
```
When a user submits this form, it'll trigger the edge function.
### Displaying comments
I used a second edge function for this, which also uses the EleventyEdge function to allow me to inject some basic filters, data, and templates.
```javascript
import * as supabase from 'https://deno.land/x/supabase_deno/mod.ts';
import {
EleventyEdge,
precompiledAppData,
} from "./_generated/eleventy-edge-app.js";
const sb = new supabase.supabaseClient(
Deno.env.get("DATABASE_URL"),
Deno.env.get("SUPABASE_SERVICE_API_KEY")
);
export default async (request, context) => {
try {
const url = new URL(request.url);
const comments = await sb.tables().get("comments").items().get("post", url.pathname);
let edge = new EleventyEdge("edge", {
request,
context,
precompiled: precompiledAppData,
// default is [], add more keys to opt-in e.g. ["appearance", "username"]
cookies: [],
});
edge.config((eleventyConfig) => {
eleventyConfig.addFilter("json", obj => JSON.stringify(obj, null, 2));
eleventyConfig.addFilter("date", dateStr => new Date(dateStr).toLocaleDateString("en-GB", {
dateStyle: "medium"
}));
eleventyConfig.addGlobalData("comments", comments);
});
return await edge.handleResponse();
} catch (e) {
console.log("ERROR", { e });
return context.next(e);
}
};
```
Again, all this does is load the comments for a Post, and adds them to the globally-available data via `eleventyConfig.addGlobalData`.
Then, I can display comments using the `edge` shortcode:
{% raw %}
```twig
{% edge "njk" %}
<h2>Comments</h2>
<ul>
{% for comment in comments %}
<li>
<strong>{{ comment.name }}</strong> <span>{{ comment.created_at | date }}</span>
<p>{{ comment.comment }}</p>
</li>
{% endfor %}
</ul>
{% endedge %}
```
{% endraw %}
### Routing
Finally, I just needed to enable my edge functions in `netlify.toml`:
```toml
[[edge_functions]]
function = "eleventy-edge"
path = "/post/*"
[[edge_functions]]
function = "comments"
path = "/post/*"
```
And that's... more or less it! This works pretty nicely, but naturally has some flaws. For a start, there's limited-to-no spam protection. There's also no way to verify users are who they say they are. So there are definitely some improvements I'll need to make going forward.
But as a first (well, second) pass, it was a good effort and a nice intro to using Edge Functions.

View File

@ -0,0 +1,98 @@
---
title: "Adding statically-generated Open Graph images"
date: 2022-11-02T00:00:00
slug: adding-statically-generated-open-graph-images
---
Open Graph images are what websites such as Twitter choose to display when showing a preview to a page. There are plenty of ways to generate these dynamically - like using Netlify Edge Functions, for one.
But recently somebody posted a [plugin they'd built](https://github.com/KiwiKilian/eleventy-plugin-og-image) the Eleventy Discord server, which gives the ability to create (fairly simple) Open Graph images at build time.
## Installation
Installation was pretty straightforward:
```bash
npm i --save-dev eleventy-plugin-og-image
```
```javascript
// .eleventy.js
const EleventyPluginOgImage = require('eleventy-plugin-og-image');
module.exports = function(eleventyConfig) {
eleventyConfig.addPlugin(EleventyPluginOgImage, {
satoriOptions: {
fonts: [
{
name: 'Pixeboy',
data: fs.readFileSync('./fonts/Pixeboy.ttf'),
weight: 700,
style: 'normal',
},
],
},
});
}
```
## Usage
Next I created a template file that can be converted to an image. It's worth mentioning that the plugin uses Vercel's [Satori](https://github.com/vercel/satori) under the hood, so there are restrictions to the CSS that can be applied. I mostly just followed the example from the documentation for this, with a couple of minor tweaks:
{% raw %}
```twig
<div style="height: 100%; width: 100%; display: flex; align-items: stretch; background-color: white;">
<div style="display: flex; flex-direction: column; flex-grow: 1; align-items: center; margin: 80px; background-color: white;">
<div style="display: flex; flex-grow: 1; flex-direction: column; align-items: center; justify-content: center; font-family: 'Pixeboy'; font-weight: 700; color: black; text-align: center;">
<h1 style="font-size: 80px;">{{ title }}</h1>
<h2 style="font-size: 44px;">{{ subTitle }}</h2>
</div>
<div style="display: flex; justify-self: flex-end; flex-grow: 0; font-family: 'Pixeboy'; font-weight: 700; color: black; text-align: center;">
<h3 style="font-size: 30px;">Lewisdale.dev</h3>
</div>
</div>
</div>
```
{% endraw %}
And then finally, I just needed to include it in my header. I have a computedData variable called pageTitle I use on some pages, so I simply used that + page description for my data:
{% raw %}
```twig
{# _includes/layout.njk #}
<head>
...
{% ogImage "./src/og-post.og.njk", { title: pageTitle or title, subTitle: description } %}
</head>
```
{% endraw %}
## Issues
The first issue I encountered was a rather cryptic error message:
```bash
[11ty] 2. (./src/_includes/layout.njk)
[11ty] EleventyShortcodeError: Error with Nunjucks shortcode `ogImage` (via Template render error)
[11ty] 3. Cannot use 'in' operator to search for 'buffer' in undefined (via Template render error)
```
This turned out to just be because I had forgotten to include the font file in my configuration initially, which Satori requires. Once I added it to the configuration, everything worked as expected.
The second issue I had was because this method creates an image for _every_ page on the website - I don't really want that. In particular, this caused issues for pages with permalinks that had no directories, such as my 404 page. The plugin doesn't handle those cases at the moment, and was trying to write to `_site/404.html/og-post.png`.
To fix this, I wrapped my usage with an if statement that meant it now is only generated for blog posts:
{% raw %}
```twig
{# _includes/layout.njk #}
{% if tags and "posts" in tags and not "drafts" in tags %}
{% ogImage "./src/og-post.og.njk", { title: pageTitle or title, subTitle: description } %}
{% endif %}
```
{% endraw %}
Finally, this plugin does no caching currently - that means that the images are generated on every single build. If you've got a lot of pages, this could mean a significantly increased build time, in which case I'd recommend doing this via Edge Functions.

View File

@ -0,0 +1,106 @@
---
title: "FediLeventy Part 2 &#8211; comments and follower lists"
date: 2022-11-09T00:00:00
slug: fedileventy-part-2-comments-and-follower-lists
---
Yes, I'm calling it FediLeventy and there's nothing any of you can do to stop me.
Yesterday, after publishing my [post on implementing some features of ActivityPub](https://lewisdale.dev/post/you-can-be-friends-with-my-blog), I received a comment from [i@social.bennypowers.dev](https://social.bennypowers.dev/@i):
![i@social.bennypowers.dev - 8 Nov 2022 - oo oo oo can we do comments via mastodon replies?](./src/images/mastodon-comment-1024x523.png)
The short answer: No. The long answer: Not yet.
## Yet is now
Given I already had most of the framework in place for handling comments, it was actually really straightforward. A comment is a `Create` activity, and looks something like this (from [the W3C document outlining the spec](https://www.w3.org/TR/activitypub/#object-without-create)):
```json
{
"@context": "https://www.w3.org/ns/activitystreams",
"type": "Create",
"id": "https://example.net/~mallory/87374",
"actor": "https://example.net/~mallory",
"object": {
"id": "https://example.com/~mallory/note/72",
"type": "Note",
"attributedTo": "https://example.net/~mallory",
"content": "This is a note",
"published": "2015-02-10T15:04:55Z",
"to": ["https://example.org/~john/"],
"cc": ["https://example.com/~erik/followers",
"https://www.w3.org/ns/activitystreams#Public"]
},
"published": "2015-02-10T15:04:55Z",
"to": ["https://example.org/~john/"],
"cc": ["https://example.com/~erik/followers",
"https://www.w3.org/ns/activitystreams#Public"]
}
```
It's fairly simple to convert this into a format that I could then display as a comment: I just store the display name for the Actor (i.e. username@domain), a flag to say it's a Mastodon comment, a link to the commenter, and the original reply. With that, I just added some extra fields for things like linking to the original status, and the user that posted the comment.
### Comment content
One minor point I had to deal with is that the content field can contain HTML - in fact all Mastodon replies seem to. I handle this by first adding in line break characters where the `<p>` tags close, and then replacing all other HTML tags:
```javascript
{
comment: body.object.content
.replaceAll('</p>', '\n')
.replace(/<[^>]+>/g, '')
}
```
and then when rendering, I just replace my line break characters with `<br>` tags. It's pretty simple, and definitely imperfect, but works well enough.
{#% image "./img/mastodon-comment.png", "Blog comments section. Last comment reads: 'I made this comment via Mastodon!'", "mx-auto h-auto" %#}
## Follower lists
This one was much simpler: I want accurate follower counts when users view the blog's profile via their instance. For that to happen, I need the `/followers` endpoint defined.
The JSON looks like this:
```json
{
"@context": "https://www.w3.org/ns/activitystreams",
"id": "https://lewisdale.dev/followers",
"type": "OrderedCollection",
"totalItems": 5,
"first": "https://lewisdale.dev/followers?page=1"
}
```
which I render with a new Edge Function, routed to [https://lewisdale.dev/followers](https://lewisdale.dev/followers). It handles two cases: the above, where there is no page parameter, and a second, where the page is included.
In both cases I load all of my followers from my database, and if there's no page requested I just output the count. If the page has been requested, I paginate the list with pages of 10, and then add next and previous links where appropriate. Here's some example output:
```json
{
"@context": "https://www.w3.org/ns/activitystreams",
"id": "https://lewisdale.dev/followers",
"type": "OrderedCollection",
"totalItems": 5,
"partOf": "https://lewisdale.dev/followers",
"orderedItems": [
"https://dapchat.online/users/lorcol",
"https://dapchat.online/users/charlie",
"https://dapchat.online/users/lewisdaleuk",
]
}
```
## Delete requests
This was an interesting one, I wasn't really expecting to receive Delete requests, but apparently if a user deletes their account, their instance may send them out to every instance it's aware of, essentially asking them to delete any information they have for them. In my case, this just means dropping any comments with that actor. Simple enough!
## Next steps
Things I still want to do:
* Track reblogs so that I can attach a "Reblogged n times" field to each post
* Improve formatting of post notifications
* General tidying up and publishing reproducable source
* Handle comment chains gracefully - right now we just ignore them

View File

@ -0,0 +1,79 @@
---
title: "Okay now I actually have Open Graph images"
date: 2022-11-04T00:00:00
slug: okay-now-i-actually-have-open-graph-images
---
Two days ago, I wrote that I had [added statically-generated Open Graph images](https://lewisdale.dev/post/adding-statically-generated-open-graph-images/), and technically this was the truth. With one minor issue: they didn't work on Twitter!
Luckily, the Eleventy twitter account came to the rescue and posted some links to more resources around generating these - particularly, a [post from Zach Leatherman on using Netlify functions to take screenshots](https://www.zachleat.com/web/automatic-opengraph/).
https://twitter.com/eleven_ty/status/1587821680369569792
So, I set to work. I deployed the linked Screenshot function, created a subdomain, and then went to generate a test screenshot and...
## Nothing at all
![Browser window with just the 11ty logo in the middle](./src/images/11ty-screenshot-failure-1024x573.png)
It didn't matter what URL I tried, nothing worked. So I started taking a look at the code to try and work out what was going on. I found [this pull request](https://github.com/11ty/api-screenshot/pull/13) on the api-screenshot repository that attempted to fix the issue by downgrading Node to 12, as Netlify now defaults to Node 16.
This didn't work for me, however, when I applied it to my own instance. In the end, the solution was to update Puppeteer and switch to `@sparticuz/chromium` as a replacement for `chrome-aws-lambda`. After performing that upgrade, and deploying, my screenshot service works! I've also writted this up as [a pull request on the api-screenshot repository](https://github.com/11ty/api-screenshot/pull/14), but as of the time of writing the build is failing.
## Finally, some images
Okay, my screenshot service was working. I added a quick template for layouts:
{% raw %}
```twig
---
title: Open Graph Blogpost
pagination:
data: collections.visiblePosts
size: 1
alias: post
permalink: "/post/{{ post.data.title | slugify }}/og-image.html"
---
<html>
<head>
<link rel="stylesheet" type="text/css" href="/styles/og-image.css" />
<link rel="stylesheet" type="text/css" href="/styles/cube.css" />
</head>
<body>
<div class="wrapper text-center flow stack-lg h-full">
<h1>{{ post.data.title }}</h1>
<h2>{{ post.data.description }}</h2>
<div class="mt-auto self-end">
<h3>lewisdale.dev</h3>
<p>{{ post.date | mediumDate }}</p>
</div>
</div>
</body>
</html>
```
{% endraw %}
and then added a shortcode for getting the screenshot service URL, and added the relevant meta tags:
```javascript
eleventyConfig.addShortcode('ogImage', page => {
const url = `https://lewisdale.dev${page.url}/og-image.html`;
return `https://screenshots.lewisdale.dev/${encodeURIComponent(url)}/opengraph`
});
```
and then went to test them out and...
## Still nothing?!
For some reason, when I went to share the page on Twitter to test it, it still wasn't picking up the heading images. I verified that it _should_ work using [https://www.opengraph.xyz](https://www.opengraph.xyz), but for whatever reason Twitter didn't like it.
In the end, it came down to re-ordering the meta tags - particularly moving my page title seemed to have an effect, beforehand it was near the bottom of my `<head>` tag. I'm not entirely sure what fixed it, but now I'm just not going to touch anything, ever, incase it breaks.
My changes to the `api-screenshot` haven't yet been merged, so in the meantime if you want to deploy a copy with my fixes, you can by pressing this button:
<a href="https://app.netlify.com/start/deploy?repository=https://github.com/lewisdaleuk/api-screenshot" target="_blank" rel="noreferrer noopener" class="mx-auto mt-size-1">
<img src="./src/images/button.svg" alt="Deploy to Netlify!" class="mx-auto mt-size-1" />
</a>

View File

@ -0,0 +1,31 @@
---
title: "Three weeks of Mastodon"
date: 2022-11-24T00:00:00
slug: three-weeks-of-mastodon
---
Were now just about at the three-week mark since one of my friends jokingly told me I should just self-host a Mastodon instance after I complained about not knowing which one to choose.
Jokes on him, I actually enjoyed it.
## [Making the switch](https://lewisdale.dev/post/three-weeks-of-mastodon/#making-the-switch)
I actually already had an account on [mastodon.social](https://mastodon.social/), that Id made a few months ago when I first heard about it, but had never really used it. Switching was trivial, it just amounts to a new field in the accounts [Actor definition](https://mastodon.social/users/lewisdaleuk.json) - as it happens, you can view the JSON Actor file for any Mastodon account by just adding `.json` to the end of their username. Handy for seeing what they look like if youre having a go.
Once I had that up, I used [fedifinder](https://fedifinder.glitch.me/) to find a few accounts and follow them, and sort of just went from there.
I also set up [Moa Bridge](https://moa.party/) to auto-crosspost between Twitter & Mastodon. Initially this was in both directions as I wasnt 100% sure I was going to like it, but after a short while I changed the settings so that only Mastodon posts got posted to Twitter, not the other way round.
## [Wait, am I a Sysadmin now?](https://lewisdale.dev/post/three-weeks-of-mastodon/#wait-am-i-a-sysadmin-now)
Ive got everything running on a single Linode node. That seems fine considering my instance only has 4 accounts, and 3 of them are quiet-to-borderline-inactive. Storage seems to be an issue, Ive resorted to fairly aggressive cron jobs that delete media to stop them spiralling. Ideally I need to look into making that more efficient in the long run, or Ill get a big bill in the near future.
## [Using Mastodon](https://lewisdale.dev/post/three-weeks-of-mastodon/#using-mastodon)
In general, I *really* like Mastodon (and the Fediverse in general) as a place to be social. It feels a lot calmer, I have way more control over who and what Im able to see - especially as a server admin. Theres just so much less *noise* - Im seeing reall interesting posts from people doing cool stuff, and Im not getting random promoted posts from tech grifters telling me I can learn Javascript in 25 minutes if I just follow their course.
## [Thoughts on the Fediverse and ActivityPub](https://lewisdale.dev/post/three-weeks-of-mastodon/#thoughts-on-the-fediverse-and-activitypub)
This wont come as a shock to anyone who already follows me or read my other posts, but I think that ActivityPub is cool as hell. I can self-host a website, implement a specific protocol, and any one of these other websites that accept it can not only follow, but can actively interact with me? Its exciting, and it almost feels like a callback to the earlier days of the internet, before it was taken over by like 4 websites.
Ive had a go at implementing a few ActivityPub features on this blog, as well as [starting but nowhere near finishing](https://github.com/lewisdaleuk/slap.git) a full implementation. This is mostly just me playing around with, and getting used to the concepts behind the protocol. Im really looking forward to seeing what I, and more importantly other people, can build with it.

View File

@ -0,0 +1,46 @@
---
title: "TIL: Including subfiles of ignored directories"
date: 2022-11-25T00:00:00
slug: til-including-subfiles-of-ignored-directories
---
Okay that title is a bit of a mess. Here's the problem, you have a directory that looks like this:
```bash
- app.ts
- scripts
- file.sh
- anotherfile.sh
- build.js
- node_modules
```
Previously I didn't want to upload anything in `scripts`, however now I've added `build.js` and I _would_ like that uploaded.
My `.ignore` looks like this right now:
```ignore
node_modules
scripts/
```
I initially thought that just adding an exception for the file would work:
```ignore
node_modules
scripts/
!scripts/build.js
```
But nope! That doesn't work. At first I thought it was to do with specificity, but reordering had no effect either.
It turns out, what I actually needed was:
```ignore
node_modules
scripts/*
!scripts/
!scripts/build.js
```
This means I exclude everything inside the scripts directory, but it still allows me to explicitly include my `build.js` file.

View File

@ -0,0 +1,175 @@
---
title: "You can be friends with my blog"
date: 2022-11-08T00:00:00
slug: you-can-be-friends-with-my-blog
---
Like almost everyone else it seems, I've [opened a Mastodon account](https://dapchat.online/@lewisdaleuk). When reading about the standard that Mastodon is built on, [ActivityPub](https://www.w3.org/TR/activitypub/), I had an idea: why not try and implement some of it with Netlify functions.
## Resources and being discoverable
To get me started, I read a couple of posts on the Mastodon blog:
* [How to implement a basic ActivityPub server](https://blog.joinmastodon.org/2018/06/how-to-implement-a-basic-activitypub-server/)
* [How to make friends and verify requests](https://blog.joinmastodon.org/2018/07/how-to-make-friends-and-verify-requests/)
First of all, I needed to generate an JSON file for my account, and point to it with a `.well-known/webfinger` file. I generate these after Eleventy has finished building and add them to my `_site` directory:
```javascript
eleventyConfig.on('eleventy.after', () => {
console.log("Writing actor file...");
const actorDef = {
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://w3id.org/security/v1"
],
id: `https://${domain}/${actor}`,
type: "Person",
preferredUsername: username,
inbox: `https://${domain}/inbox`,
summary: "Posts, streamed from @lewisdaleuk@dapchat.online 's blog",
"attachment": [
{
"type": "PropertyValue",
"name": "Website",
"value": "https://lewisdale.dev",
},
],
publicKey: {
id: `https://${domain}/${actor}#main-key`,
owner: `https://${domain}/${actor}`,
publicKeyPem: Buffer.from(process.env.PUBLIC_KEY, 'base64').toString("utf-8"),
}
}
fs.writeFileSync(`${outDir}/${actor}`, JSON.stringify(actorDef));
});
eleventyConfig.on('eleventy.after', () => {
if (!fs.existsSync(`${outDir}/.well-known`)) {
fs.mkdirSync(`${outDir}/.well-known`);
}
const wf = {
subject: `acct:${actor}@${domain}`,
links: [
{
rel: "self",
type: "application/activity+json",
href: `https://${domain}/${actor}`
},
{
rel: "http://webfinger.net/rel/profile-page",
type: "text/html",
href: `https://${domain}/blog`
}
]
};
fs.writeFileSync(`${outDir}/.well-known/webfinger`, JSON.stringify(wf));
});
```
You can see these in action at https://lewisdale.dev/lewisdale and https://lewisdale.dev/.well-known/webfinger.
Once I had these in place, I was able to "discover" my profile from my Mastodon instance, but couldn't follow the profile:
{#% image "./img/lewisdale-mastodon-find.png", "Mastodon search UI. The text '@lewisdale@lewisdale.dev' has been entered and there is one matching result", "mx-auto h-auto" %#}
## Enabling followers
Okay, this was the bit that had me stumped for several days. The flow basically looks like this:
* Receive a "Follow" message from another user (an Actor)
* Send an "Accept" message to that user confirming you've received the request
* Persist the Actor information so that you can notify them of new posts
A Follow message:
```json
{
"@context": "https://www.w3.org/ns/activitystreams",
"id": "a-request-id",
"type": "Follow",
"actor": "https://lewisdale.dev/lewisdale",
"object": "<the actor sending the request>"
}
```
And an Accept message, where `object` is the original Follow message:
```json
{
"@context": "https://www.w3.org/ns/activitystreams",
"id": "https://lewisdale.dev/lewisdale/12345",
"type": "Accept",
"actor": "https://lewisdale.dev/lewisdale",
"object": {
"@context": "https://www.w3.org/ns/activitystreams",
"id": "a-request-id",
"type": "Follow",
"actor": "https://lewisdale.dev/lewisdale",
"object": "<the actor sending the request>"
}
};
```
So, this is all fairly straightforward so far - there are some other bits I've not covered, like verifying the request signature, but for the most part the data is pretty easy to parse.
I wrote this as a [Netlify Edge Function](https://docs.netlify.com/edge-functions/overview/) because it meant I could route requests to `/inbox` straight to it using my `netlify.toml`:
```toml
[[edge_functions]]
function = "inbox"
path = "/inbox"
```
The hard part was signing requests, not because signing with RSA keys is hard, but because Deno (which is the runtime for Netlify Edge Functions) only supports the SubtleCrypto API. The only major difference is that it operates on the raw data for the keyfile, so I couldn't just pass a Base64 encoded PEM file like I can with Node's crypto library.
But, once I'd overcome that hurdle (which, to be honest, is a whole other post), Following worked! I also implemented the `Undo` format, which is what a user will send when they want to unfollow you, but other than that I was ready to start accepting followers.
## Publishing posts
The final step was to publish posts. I achieve this with a standard Netlify serverless function, which I placed in `functions/deploy-succeeded.ts`, which means Netlify will detect it and automatically run it every time I successfully deploy the blog.
So, for each new post, I send a `Create` message to each follow, which looks a bit like this:
```json
{
"@context": "https://www.w3.org/ns/activitystreams",
"id": "https://lewisdale.dev/lewisdale/an-id",
"type": "Create",
"actor": "https://lewisdale.dev/lewisdale",
"to": ["https://www.w3.org/ns/activitystreams#Public"],
"object": {
"id": "https://lewisdale.dev/post/my-post-url/an-id",
"type": "Note",
"published": "<the post date>",
"attributedTo": "https://lewisdale.dev/lewisdale",
"content": "<p>New post: <post title><br /><a href=\"https://lewisdale.dev/post/my-post-url\">https://lewisdale.dev/post/my-post-url</a></p>",
"to": ["https://www.w3.org/ns/activitystreams#Public"]
} ,
"cc": ["<follower url>"]
}
```
I also then log a reference to the message with the ID, so that I can send an `Undo` request if I need to delete the post, and so that I can avoid sending the same Post multiple times.
## Data storage
So when developing this, I used Netlify Forms as my data store. This went great until I tried to insert all of my older posts into the forms, which took me to the limit of the free tier (100 submissions/month), and I _think_ got me rate limited.
In the end, as with my [comments system](https://lewisdale.dev/post/adding-comments-to-my-blog/), Supabase was the lowest-barrier way to handle this. The free tier should be plenty of space and bandwidth to handle everything I need.
## Final thoughts
This was a _lot_ of fun to build. It works surprisingly well, and I learned a lot about cryptography along the way, so it's a big win.
I actually really like the idea of ActivityPub, it feels like the way the web was intended to work. I've got some ideas for other things I could build using it, so may end up pumping out some other projects.
I'm going to work on cleaning up the code that runs this, and I'll release it as a repository that can be deployed straight to Netlify. There are also a few other things I'd like to work on - it'd be cool to insert replies to posts as comments, for one. Right now there's no way I can see them unless they happen to turn up on my timeline.
In the meantime, you can now follow my blog through your Mastodon/other ActivityPub instance at `@lewisdale@lewisdale.dev`.

View File

@ -0,0 +1,13 @@
---
title: "2022: A retrospective"
date: 2022-12-17T00:00:00
slug: 2022-a-retrospective
---
Or not.
I started this post intending to write an honest retrospective, in a similar vein to [the one I wrote last year](https://lewisdale.dev/post/year-retrospective). But, honestly, this year has been shit and the thought of writing that post exhausted me.
Its not all been bad, of course: I (finally) got married this year, after several attempts scuppered by the pandemic. Im continuing to find a niche in work that Im good at. I started a new job last month and things are going really well there.
But its been really tough. Hopefully next year will be better.

View File

@ -0,0 +1,126 @@
---
title: "Advent of Code: Day Five"
date: 2022-12-05T00:00:00
slug: advent-of-code-day-five
tags: [advent-of-code-2022]
---
**Spoilers for Advent of Code below**
* [Day four](/post/advent-of-code-day-four)
* [All Advent of Code posts](/blog/category/advent-of-code)
* [Source](https://github.com/lewisdaleuk/advent-of-code-2022)
Today was the first day that I can definitely say that I struggled to get the task done.
## Part one
The brief _sounded_ fairly simple. Given a list of "stacks" of boxes (represented by a character), and a set of instructions (in the format `move n from x to y`), work out what the final configuration would look like.
The input:
```bash
[D]
[N] [C]
[Z] [M] [P]
1 2 3
move 1 from 2 to 1
move 3 from 1 to 3
move 2 from 2 to 1
move 1 from 1 to 2
```
I was planning out my data structures, and this is where I made my first and silliest mistake: I used String for my stacks. "They're just lists of chars", I thought, this should be easy.
Unfortunately, my logic was off and I kept forgetting to reverse my strings, leading to a lot of banging my head against the wall when I came up with this:
```rust
#[derive(Debug, PartialEq, Eq)]
struct Operation {
quantity: usize,
source: usize,
target: usize
}
#[derive(Debug, PartialEq, Eq)]
pub struct Crane {
pub stacks: Vec<String>,
operations: VecDeque<Operation>,
}
impl Crane {
fn operate(&mut self) {
let operation = self.operations.pop_front().unwrap();
let split: String = self.stacks[operation.source - 1].take(operation.quantity).unwrap();
self.stacks[operation.target - 1].insert_str(0, &split);
}
}
```
The `take` function was my first foray into using `Trait` to extend a core type:
```rust
trait Take<T> {
fn take(&mut self, n: usize) -> Option<T>;
}
impl Take<String> for String {
fn take(&mut self, n: usize) -> Option<String> {
if n <= self.len() {
let split = String::from(&self[..n]);
self.replace_range(..n, "");
return Some(split);
}
None
}
}
```
But this didn't work, because my take function took the _top_ off the string, and then replaced it in the original order, so instead of going from:
```bash
P
N Z
C D B
```
to:
```bash
N
C P
P Z
Z D B
```
I was doing:
```bash
N
C
Z
P D B
```
A subtle difference, but very important for the final result. In the end, I updated `operate` to reverse the strings before prepending them to the stack:
```rust
fn operate(&mut self) {
let operation = self.operations.pop_front().unwrap();
let split: String = self.stacks[operation.source - 1]
.take(operation.quantity)
.unwrap()
.chars()
.rev()
.collect();
self.stacks[operation.target - 1].insert_str(0, &split);
}
```
## Part two
Interestingly... Part two's problem was the same, but I was to retain the original insertion order of the stack. Well, well, well, look how the tables have turned. My earlier cockup has become my superpower.
All I had to do was revert my little change from earlier, returning `operate` back to it's original state, and that was it!
Day 5 was a toughie for me, but mostly because I tried to be clever but really wasn't. Next time, I'll use an actual Stack - even if I need to write the operations for it myself.

View File

@ -0,0 +1,122 @@
---
title: "Advent of Code: Day Four"
date: 2022-12-04T00:00:00
slug: advent-of-code-day-four
tags: [advent-of-code-2022]
---
**Spoilers for Advent of Code below**
* [Day three](/post/advent-of-code-day-three)
* [All Advent of Code posts](/blog/category/advent-of-code)
* [Source](https://github.com/lewisdaleuk/advent-of-code-2022)
Day four was probably the quickest for me to complete - but mostly because the other days had primed me pretty well for this kind of task and I'd been reading about using `HashSet` the night before.
## Part one
Given set of range pairs, find the total number of pairs in which one is a subset of the other.
So, given:
```bash
2-4,6-8
2-3,4-5
5-7,7-9
2-8,3-7
6-6,4-6
2-6,4-8
```
I'd expect the following outputs:
```bash
false
false
false
true
true
false
```
The standard library makes it really easy to work with both ranges and structs, so first off I split and parse the ranges, and add them to HashSets:
```rust
fn parse_range(range: &str) -> HashSet<i32> {
let pieces: Vec<i32> = range.split('-').map(|r| r.parse::<i32>().unwrap()).collect();
(pieces[0] .. pieces[1]).collect()
}
fn overlaps(ranges: &str) -> bool {
let (a, b) = range.split(',')
.map(parse_range)
.collect_tuple()
.unwrap();
}
```
Then, I just check if `a` is a superset of `b`, or vice-versa:
```rust
a.superset(&b) || b.superset(&a)
```
and tests pass! Seems to work. So I read my input file, and then filter it using this function:
```rust
let outputs: Vec<&str> = input.split('\n').filter(cleaning::range_overlaps).collect();
println!("Overlaps: {}", overlaps);
```
### Except...
It doesn't. My answer was wrong - after a bit of investigation, I found an example that shouldn't have passed:
```bash
50-50,1-49
```
It turns out that in Rust, ranges don't include the final index - so a range of `[50 - 50]` is actually an empty set, so any tests involving this would pass - because a set always contains an empty set.
I updated my `parse_range` code to handle this:
```rust
fn parse_range(range: &str) -> HashSet<i32> {
let pieces: Vec<i32> = range.split('-').map(|r| r.parse::<i32>().unwrap()).collect();
if pieces[0] == pieces[1] {
return HashSet::from([pieces[0]]);
}
(pieces[0] .. pieces[1] + 1).collect()
}
```
and everything passes! On to Part Two
## Part two
Now find the number of entries that simply overlap, rather than are subsets.
This was a one-line change, because `HashSet` also has functions for this. I changed:
```rust
a.superset(&b) || b.superset(&a)
```
to:
```rust
!a.is_disjoint(&b)
```
And ran it, which turned out to be all that I needed. Day four complete, and if I remember to do tomorrow, I'll have beaten my own high score.
## Update
After reading [this post from fasterthanli.me](https://fasterthanli.me/series/advent-of-code-2022/part-4), I've updated my parsing code to use `RangeInclusive` rather than just a `Range`. This means that I can get rid of my preemptive assertion, as well as the incremement on my range:
```rust
fn parse_range(range: &str) -> HashSet<i32> {
let pieces: Vec<i32> = range.split('-').map(|r| r.parse::<i32>().unwrap()).collect();
(pieces[0]..=pieces[1]).collect()
}
```

View File

@ -0,0 +1,11 @@
---
title: "Advent of Code: Day One"
date: 2022-12-01T00:00:00
slug: advent-of-code-day-one
tags: [advent-of-code-2022]
---
Its the first day of Advent of Code! I finally get to continue my long-standing tradition of being really enthusiastic about it for two-or-three days, missing one day, and not going back.
Today wasnt too difficult, as it never is at the start. Once again I decided to use Rust, because Id like to eventually be something close to competent with it. I spent way longer than intended on the code, absolutely convinced that there was a bug in the question, only to eventually realise that Id misread the question.
You can follow my progress [on Github](https://github.com/lewisdaleuk/advent-of-code-2022).

View File

@ -0,0 +1,89 @@
---
title: "Advent of Code: Day Six"
date: 2022-12-06T00:00:00
slug: advent-of-code-day-six
tags: [advent-of-code-2022]
---
**Spoilers for Advent of Code below**
* [Day five](/post/advent-of-code-day-five)
* [All Advent of Code posts](/blog/category/advent-of-code)
* [Source](https://github.com/lewisdaleuk/advent-of-code-2022)
Day six was a nice, welcome break from the struggle I had yesterday.
## Part one
Given a set of characters relaying a message, find the index of packet start signal, which comes after the first four unique characters.
So if my input is:
```txt
mjqjpqmgbljsphdztnvjfqwrcgsmlb
```
then my output should be `5`, because that is the index of the first set of unique characters - `qjpq`.
My first attempt was also my last attempt, as I used a `HashSet` of characters from a moving 5-char block. If the length was 5, that meant I'd found the index of my packet marker.
```rust
pub fn find_marker_index(input: &str) -> Option<usize> {
let mut set: HashSet<char> = HashSet::new();
for idx in 0..input.len() {
set = HashSet::from_iter(input[idx..=idx+4].chars());
if set.len() == 5 {
return Some(idx+4);
}
}
None
}
```
## Part two
Much the same, except now we need to also find a message start signal, which comes after `14` unique characters. I updated my `find_marker_index` function to take an offset, and updated the logic to use it. I also reversed my range, because I realised that I would quite easily hit string overflow issues - it was simply luck that I hadn't yet:
```rust
pub fn find_marker_index(input: &str, marker_offset: usize) -> Option<usize> {
let mut set: HashSet<char>;
for idx in marker_offset..input.len() {
set = HashSet::from_iter(input[idx-marker_offset..idx].chars());
if set.len() == marker_offset {
return Some(idx);
}
}
None
}
```
As a final pass, I moved my function to a `trait` implemented on `str`, because I'm trying to learn how to properly work with them:
```rust
pub trait FindUnique {
fn find_unique(&self, offset: usize) -> Option<usize>;
}
impl FindUnique for str {
fn find_unique(&self, offset: usize) -> Option<usize> {
for idx in offset..self.len() {
let len = HashSet::<char>::from_iter(self[idx - offset..idx].chars()).len();
if len == offset {
return Some(idx);
}
}
None
}
}
```
Which means I can now just call my function straight from reading the file:
```rust
if let Some(idx) = read_file(...).unwrap().find_unique(14) {
// Output
}
```
And that worked! Day six checked off, with a nice tidy solution that I'm very happy with.

View File

@ -0,0 +1,44 @@
---
title: "Advent of Code: Day Three"
date: 2022-12-03T00:00:00
slug: advent-of-code-day-three
tags: [advent-of-code-2022]
---
**Spoilers for Advent of Code below**
* [Day two](/post/advent-of-code-day-two)
* [All Advent of Code posts](/blog/category/advent-of-code)
* [Source](https://github.com/lewisdaleuk/advent-of-code-2022)
Day three, checked off ✅. I'm rapidly closing in on a high score here (although that's only Day 4, so it's a very low bar).
This wasn't too much of a challenge either, the `IterTools` crate gave me some useful helpers that made parsing the data slightly easier, and `HashMaps` means I could pretty quickly construct a unique set of data.
To calculate the score, I was lazy. I constructed a string containing every character `a-zA-Z`, and just used the index of a character + 1:
```rust
fn score_char(character: &char) -> i32 {
let mut priorities = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".chars();
(priorities.position(|c| c == *character).unwrap() as i32) + 1
}
```
Dead simple, and works well. Will break dramatically if an unexpected character is entered (well, it'll panic anyway). PArt two was more-or-less the same, I just needed to write some extra code to produce chunks of data (spoiler: I used `iter.chunk`):
```rust
pub fn group_and_score(inputs: Vec<String>) -> i32 {
inputs
.chunks_exact(3)
.map(|chunk| {
ElfGroup::from_lines([
String::from(&chunk[0]),
String::from(&chunk[1]),
String::from(&chunk[2]),
])
.score
})
.sum()
}
```
I'm quite enjoying this so far, let's see what Day 4 brings.

View File

@ -0,0 +1,123 @@
---
title: "Advent of Code: Day Two"
date: 2022-12-02T00:00:00
slug: advent-of-code-day-two
tags: [advent-of-code-2022]
---
**Spoilers for Advent of Code below**
* [Day one](/post/advent-of-code-day-one)
* [All Advent of Code posts](/blog/category/advent-of-code)
* [Source](https://github.com/lewisdaleuk/advent-of-code-2022)
Day two is done! Again, as expected this was pretty simple - construct a basic Rock, Paper, Scissors game from an esoteric strategy guide and then calculate the score. Do these elves ever actually work?
The strategy guide looks like this:
```bash
A Y
B X
C Z
```
Column A maps to the other player's move (A -> Rock, B -> Paper, C -> Scissors), while column B maps to your move.
## Implementing part one
To begin with, we assume that X, Y, and Z map directly to moves, much in the same way that they do for column A. We then use that to calculate a score. Firstly our move gets given a rating (1 for Rock, 2 for Paper, 3 for Scissors). Then We get a score based on the outcome of the game - a win is 6 points, a loss 0 , and a draw 3.
Pretty simple stuff. I used an enum to represent a move, an a struct for rounds:
```rust
pub enum Moves {
Rock,
Paper,
Scissors
}
#[derive(Debug, PartialEq, Eq)]
pub struct Round {
pub first: Moves,
pub second: Moves
}
```
I then map my columns to moves, using simple `match` statements:
```rust
let their_move = match column_a {
"A" => Moves::Rock,
"B" => Moves::Paper,
"C" => Moves::Scissors
}
let my_move = match column_b {
"X" => Moves::Rock,
"Y" => Moves::Paper,
"Z" => Moves::Scissors
}
```
Assign a shape score, again using a match:
```rust
let shape_score = match shape {
Moves::Rock => 1,
Moves::Paper => 2,
Moves::Scissors => 3,
};
```
Finally, I just need to actually work out the outcome and return the score for it, add it to the shape score, and return it. I'm using nested matches for this, but there's undoubtedly a better way to handle it.
```rust
let game_score = match their_move {
Moves::Rock => {
match my_move {
Moves::Rock => 3, // Draw
Moves::Paper => 6, // Win
Moves::Scissors => 0, // Loss
}
},
Moves::Paper => {
match my_move {
Moves::Rock => 0,
Moves::Paper => 3,
Moves:Scissors => 6
}
},
Moves::Scissors => {
match my_move {
Moves::Rock => 6,
Moves::Paper => 0,
Moves::Scissors => 3,
}
}
}
game_score + shape_score
```
Aaand it worked - after running it with my output and summing the result, got my first start.
## Part two
Part two was much quicker - the only rule change was that instead of directly translating to a move, the second column indicated the expected outcome of the match: X meant I had to lose, Y a draw, and Z a win. That was easy enough to do, I had to change the logic for when I map `my_move` from a `str`:
```rust
let my_move = match column_b {
"X" => match their_move {
Moves::Rock => Moves::Scissors,
Moves::Paper => Moves::Rock,
Moves::Scissors => Moves::Paper
}
"Y" => first,
"Z" => match first {
Moves::Rock => Moves::Paper,
Moves::Paper => Moves::Scissors,
Moves::Scissors => Moves::Rock
},
_ => Moves::Rock
};
```
The rest of the logic stays the same, so no changes needed.

View File

@ -0,0 +1,27 @@
---
title: "Choosing a CMS to use with Eleventy"
date: 2022-12-29T11:37:59
slug: choosing-a-cms-to-use-with-eleventy
---
After my last post, I decided that I would in fact start using a CMS, so I took a look at some of the options available to me. My criteria for choosing a CMS were:
- Lightweight
- Easy to setup
- Has markdown editing
- Works with Eleventy
- Can be accessed from devices that arent my laptop
The first CMS I came across was one called [Strapi](https://strapi.io/) that looked pretty intriguing, purely because of the configuration capabilities. It looks pretty powerful, and I suspect would be very useful if I was making more complex content than just blog posts. But the setup for it looked a bit more complicated than I have the patience for.
I also considered Wordpress, because to be honest its the de-facto standard for a reason. I decided against it purely because of the headache of setting it up to generate an Eleventy site was a bit too much for right now.
In the end, I settled on [Netlify CMS](https://netlifycms.org/). Its basically a fancy frontend for git - you configure it with your repository, tell it what your content types should look like (e.g. what data should live in the front matter), and it commits files to the repo for you. It was pretty simple to set up, I took some config pointers from [this starter by @surjithctly](https://github.com/surjithctly/neat-starter).
Lets see how it goes - I might end up moving to Wordpress in the end regardless, or there are platforms like [Contentful](https://contentful.com/), but I like that this is just committing to my repository, so in the end all the files and data are still under my control.
**n.b.** Ive attempted to configure syndication via IFTTT and Webmentions, so *hopefully* this will get published shortly after I post it
## [Update](https://lewisdale.dev/post/choosing-a-cms-to-use-with-eleventy/#update)
It turns out that Netlify CMS isnt very well supported anymore - in particular its still not mobile-friendly, despite having an open issue for it for 5 years. Im going to keep an eye out for an alternative.

View File

@ -0,0 +1,11 @@
---
title: "Get your Eleventy site onto the Fediverse"
date: 2022-12-12T00:00:00
slug: get-your-eleventy-site-onto-the-fediverse
---
A little while ago, [I brought my blog to the Fediverse](https://lewisdale.dev/post/you-can-be-friends-with-my-blog). This was a fun experiment which resulted in me turning my Netlify-hosted blog into an instance.
Now, you can do (some of that) too! Ive built an Eleventy plugin: [eleventy-plugin-activity-pub](https://www.npmjs.com/package/eleventy-plugin-activity-pub). You can include this in your `.eleventy.js` file to generate a simple actor & webfinger JSON file that will your website discoverable as a Fediverse user.
Its early days for the plugin, but the source is available [on Github](https://github.com/lewisdaleuk/eleventy-plugin-activity-pub) and Pull Requests are absolutely welcome. Id like to expand the functionality to provide facilities for integrating with Mastodons API for commenting & publishing, as well as allowing you to act as an alias for an existing user - but what that will look like Im not sure, possibly a separate plugin.

View File

@ -0,0 +1,29 @@
---
title: "Managing my reading list with BookWyrm"
date: 2022-12-16T00:00:00
slug: managing-my-reading-list-with-bookwyrm
---
A little while ago [I wrote about managing my reading list using a JSON file](/post/managing-my-reading-list). Then I decided that was too _easy_, so I [starting using an SQLite database instead](/post/moving-my-reading-list-out-of-json).
Moving my reading list to JSON was a mistake, to be frank. Data is spread across three tables, and there's no easy way to reference the schema while I'm adding new entries, as I mostly do it via the slqite3 CLI.
I do have a Goodreads account, however there's no public API to access, so I can't (easily) use it as a data source.
## Enter BookWyrm
BookWyrm ticks three boxes for me:
1. it's easy to use
2. it's part of the Fediverse
3. it has an RSS feed
So, I've started moving my reading list out of my SQLite database, and into BookWyrm. And to make it easier, I've written a plugin for Eleventy.
[eleventy-plugin-bookwyrm](https://www.npmjs.com/package/eleventy-plugin-bookwyrm) is a pretty simple Eleventy plugin. If you give it your BookWyrm instance URL and handle, it'll fetch and parse the RSS feed, and add it to Eleventy global data for you, so that it can be used in templates.
I've not finished moving my list over yet, but once I do, I'll be switching over my [reading list](/reading-list) and using this as a data source from now on. You can also follow my reading at [@lewisdaleuk@bookrastinating.com](https://bookrastinating.com/users/lewisdaleuk).
### Update: I hit a snag!
It turns out that BookWyrm only allows up to 10 items in an RSS feed, with no pagination! Luckily, I'd forgotten about outbox files - every profile has public outboxes that show the public feeds. I've updated the plugin to use this instead, which works nicely. I've lost some of my dates on my reading list, but I guess that's a small price to pay.

View File

@ -0,0 +1,60 @@
---
title: "Microblogging with Eleventy"
date: 2022-12-30T21:24:54
slug: microblogging-with-eleventy
---
Given the drive to move all of my content into one place and syndicate it to other networks, I decided that I'd also try out doing microblog-style posts with Eleventy. Before I could do that, I needed to add a CMS (there's no way I'm manually adding Markdown files everytime I want to post a status).
Once that was done, I added a new collection for Microblog posts, which are just text fields with a posting datetime - no title, or any of the other frontmatter data that I'd normally add to a full blog post.
I also modified Netlify CMS to enable a max length on textarea fields - Mastodon Toots are 500 characters, so that's where I drew the line.
Finally, I created a new [RSS feed](https://lewisdale.dev/micro/feed.xml) for my microblog posts - this will also be important later when I want to publish to other platforms.
## Syndicating
I've already added [Webmentions](https://indieweb.org/Webmention) to my website, which allow me to send and receive certain types of interactions from other websites. These map pretty nicely to certain social media interactions, like replies, reblogs, and likes.
To start with, I need to be able to send out Webmentions when they're included. To do this, I use [Webmention.io](https://webmention.io), which provides me a webhook I can call. Then, I use [IFTTT](https://ifttt.com) to trigger the webhook when it detects a new RSS feed item.
The final step is to use [Bridgy](https://brid.gy) to handle cross-posting. This is done by including the webmention syndication URL in the post body as an invisible anchor. For cross-posting to work, I need to markup my post using [Microformats](https://indieweb.org/microformats)
For blog posts, this means adding `h-entry` with `e-content` and `p-name` tags. Bridgy will detect these, determine that it's an article, and cross-post just the article title and a link.
{% raw %}
```twig
<article class="h-entry">
<h1 class="p-name">{{ title }}</h1>
<div class="e-content">
{{ content | safe }}
</div>
<div class="syndication">
<a href="https://brid.gy/publish/mastodon"></a>
</div>
</article>
```
{% endraw %}
For microblog posts, this is slightly different. Bridgy assumes that a post is an article if it contains a `p-name` tag, so I omit that. In it's place I include the timestamp, which is slightly more important for these:
{% raw %}
```twig
<article class="h-entry">
<time class="dt-published">{{ date | microDate }}</time>
<div class="flow e-content">
{{ content | safe }}
</div>
<div class="syndication">
<a href="https://brid.gy/publish/mastodon"></a>
</div>
</article>
```
{% endraw %}
## Next steps
This works reasonably well - there's a fairly large delay between publishing on my site and syndicating across to different platforms. That's mostly because there are several different intermediaries that have to be triggered in turn (IFTTT -> Webhooks -> Webmention -> Brid.gy -> Mastodon). In fairly short order I'd like to replace at least some of this with my own code. I already use post-deploy Netlify functions to send ActivityPub messages, so I may as well use it for other syndication too.
I also want to improve some of the markup on my microblog posts, and add a proper feed to my home page. But that'll also come with a bit of a site redesign, because I'm getting bored of this one.

View File

@ -0,0 +1,11 @@
---
title: "Taking a break"
date: 2022-12-23T00:00:00
slug: taking-a-break
---
Its my last day of work for 2022 - Im off from today until the 3rd of January, which will be my longest break since my honeymoon. Im looking forward to taking some time off: weve got some Christmassy things planned for tomorrow, and its my daughters second birthday in between Christmas and New Years day (and Im very excited for her to get the present we bought her).
Hopefully this will mean I get some time to relax and recuperate a bit. Im going to *try* and avoid doing any code-related things, but well see how well that goes. Im also hoping to get out for some longer bike rides as I wont have my commute to provide me with the exercise.
Anyway, merry Christmas to those who celebrate, and happy new year to anyone reading.

View File

@ -0,0 +1,11 @@
---
title: "Thinking about the web"
date: 2022-12-28T00:00:00
slug: thinking-about-the-web
---
Ive been seeing some good posts recently, like these ones from [Andy Bell](https://andy-bell.co.uk/bring-back-blogging/), [Chris Coyier](https://chriscoyier.net/2022/12/26/bring-back-blogging/), and [Sophie Koonin](https://localghost.dev/blog/building-a-website-like-it-s-1999-in-2022/), about using a blog as a real “base” for your place on the web, and then following the [POSSE principle](https://indieweb.org/POSSE) for everywhere else.
I like that idea - this is the one part of the web I have the most control over. Its already partially-federated (with some improvements coming this way in the near future). I just need to set up some more syndication tools using IFTTT, and then I think Ill be good to go.
Id like to add a second post format too, for shorter-form posts that Id normally have written for Twitter - as well as making it a bit easier to publish content. But that means getting a CMS of some description, so might take me a little while.

View File

@ -0,0 +1,109 @@
---
title: "Adding categories to Eleventy"
date: 2022-05-28T00:00:00
slug: adding-categories-to-eleventy
---
I've decided to put a bit more love into this blog, I've neglected it since the new year. As part of that, I wanted to make a few more changes - namely, I wanted some better navigation to allow me to write different types of content. So, I've added a little category list to the site to allow people to search by different tags.
## Organising posts
First of all, to identify blog posts, I use a single tag: `posts`. I use an `11tydata.json` file in my posts directory that looks like this to ensure every post is automatically tagged correctly:
```javascript
{
"tags": [
"posts"
],
"permalink": "post/{{ title | slug }}/",
"layout": "post.njk"
}
```
I also have a `draft` tag, that I use to un-publish posts that I'm working on without needing to keep WIP on one machine. I'll assume that any other tag that an item in the `posts` collection has is it's category, and that a post can have multiple categories.
## Getting the category list
So, to generate a list of categories and the number of posts in each category, I've added a [simple custom collection](https://www.11ty.dev/docs/collections/#collection-api-methods) to my site, called `categories`. Here's the code:
```javascript
eleventyConfig.addCollection('categories', (collectionApi) => {
const posts = collectionApi
.getFilteredByTag("posts")
.filter(p => !p.data.tags.includes("draft"));
return posts.reduce((tags, post) => {
post.data.tags.filter(tag => tag !== 'posts').forEach(tag => {
if (!tags[tag]) {
tags[tag] = 0;
}
tags[tag]++;
});
return tags;
}, {"All posts": posts.length})
});
```
It's fairly simple, even if Javascript's `reduce` is a callback-headache. All we're doing is getting all of the items in the `posts` collection, removing anything tagged as a draft post, and then for each tag we're first checking if the tag already exists. If it doesn't exist, we initialise it in our tags object with a count of 0. Then, we increment the tag count by 1. We then also add an extra tag called `All posts`, which is the total count of the `posts` object.
The output of this function is an object that looks like this:
```javascript
{
"All posts": 10,
"frontend": 3,
"backend": 2,
"recipes": 4,
"books": 1
}
```
## Displaying categories
Listing the categories is easy, we just need to use our new collection:
{% raw %}
```html
<ul>
{% for category, count in (collections.categories) %}
{% if category == "All posts" %}
<li><a href="{{ '/blog' | url }}">{{ category }} ({{ count }})</a></li>
{% else %}
{% set caturl = ["/blog", "category", category] | join("/") %}
<li><a href="{{ caturl | url }}">{{ category }} ({{ count }})</a></li>
{% endif %}
{% endfor %}
</ul>
```
{% endraw %}
To actually display a category, [Eleventy has an easy guide for this](https://www.11ty.dev/docs/quicktips/tag-pages/). We just need a bit of customisation to use our blog layout, filter out tags such as drafts, and the categories themselves, and then set our permalink:
{% raw %}
```yaml
---
pagination:
data: collections
size: 1
alias: tag
filter:
- draft
- categories
- all
permalink: /blog/category/{{ tag }}/
layout: blog.njk
eleventyComputed:
pageTitle: Posts Tagged "{{ tag }}"
title: Lewis Dale's Blog
---
{% set taglist = collections[ tag ] %}
{% for post in (taglist | filterDrafts | sortedByDate) %}
{% include "components/blogpost.njk" %}
{% endfor %}
```
{% endraw %}
And that's pretty much it! There's probably still some work to be done with paginating the tags once I have enough posts to need it.

View File

@ -0,0 +1,81 @@
---
title: "Managing my reading list"
date: 2022-06-12T00:00:00
slug: managing-my-reading-list
---
A little while ago I was toying with building a lightweight web app that would make it easy to manage and share reading lists. I never got around to making it, but instead I built a very-MVP version by adding my [reading list](/reading-list) to this website. It was a fairly simple construction, I simply added a file called `reading-list.11tydata.json` to my source directory, that looked like this:
```json
{
"books": [
{
"title": "Eyes of the Void",
"author": "Adrian Tchaikovsky",
"goodreads": "https://www.goodreads.com/book/show/58950674-eyes-of-the-void",
"status": "completed",
"completed_date": "25/05/22"
},
{
"title": "The Player of Games",
"author": "Iain M. Banks",
"goodreads": "https://www.goodreads.com/book/show/18630.The_Player_of_Games",
"status": "started"
},
]
}
```
Then, on my reading list page I could simply iterate over the `books` variable and output the data however I pleased. And it worked absolutely _fine_.
But it was't quite what I wanted, it doesn't give any information about the books I'm reading, e.g. if I wanted to add book covers I'd have to upload them manually. So, last night, when I really should have been sleeping, I started working on using data files, combined with [using pagination to construct pages from data](https://www.11ty.dev/docs/pages-from-data/), to build something that was slightly closer to my original ambition.
## Getting information about a book
Choosing a data source was pretty easy. [Open Library](https://openlibrary.org) has a very simple route-based API that would allow me to do all the things I needed to: search for a book by author and title, get information about that book, and get the book covers.
I did all this in a data file inside the `_data` directory, called `readinglist.js`. I added my original list of books, and then for each book I use the API to first search for the book, and choose the first result, then I get more detailed information about the book using the `Works` API, and then finally I look for a cover if it exists. Here's all the code to do that:
```javascript
const getBook = async book => {
const res = await axios.get(`https://openlibrary.org/search.json?author=${book.author}&title=${book.title}`);
const key = res.data.docs[0].key;
const work = (await axios.get(`https://openlibrary.org${key}`)).data;
const cover = work.covers ? `https://covers.openlibrary.org/b/id/${work.covers[0]}-M.jpg` : undefined;
return {...book, cover, "data": work };
};
module.exports = async function() {
return await Promise.all(books.map(getBook));
}
```
## Constructing pages from the data
**Update:** I removed this feature as it wasn't particularly valuable and was slowing the build down
Eleventy makes this incredibly easy. I just had to add some pagination rules to the front-matter data on a new template:
```yaml
layout: layout.njk
pagination:
data: readinglist
size: 1
alias: book
permalink: book/{{ book.title | slugify }}/index.html
eleventyComputed:
title: "{{ book.title }} - {{ book.author }}"
---
```
Eleventy took care of the rest, and generated all of the pages. I did find that I had to specify the `.html` extension for the permalinks. If I left it out, Eleventy wouldn't generate a valid HTML page and instead navigating to it would download the file instead of displaying it in-browser.
## The result
You can see the results for yourself on the [reading list](/reading-list/), ~~or by viewing one of the information pages for a book, such as [Murder on the Orient Express](/book/murder-on-the-orient-express)~~. Overall, I'm actually really happy with how it turned out. It's a step closer to the web app that I'd originally envisioned, but it only took a few hours to put together.
There are a few limitations: I can't fine-tune what version of the book the API returns without more work, and I've got no control over the descriptions provided. But, I think it's a fair compromise to achieve what I wanted.
Overall, this was a fun little late-night project to pick up. As usual, I love how easy Eleventy makes tasks like this.

View File

@ -0,0 +1,17 @@
---
title: "Why I don&#8217;t write React anymore"
date: 2022-06-23T00:00:00
slug: why-i-dont-write-react-anymore
---
Earlier this year I made the decision to stop working in React. I'd just come out of a project using a modern JS tech stack that used Next.js, and I couldn't get over how _complex_ the whole thing had become, and how little I enjoyed the work.
It felt like every new feature and change had a _huge_ overhead. To create a modal dialog, creating something extremely common, like a dialog, became like reinventing the wheel each time: create the dialog component, oh, now we need a component for the content, and now we also need components for the controls (which are different each time), and now we need to handle the component lifecycle with hooks. Suddenly, without realising, a dialog takes 4 different files and a whole plate of spaghetti code to get functional.
Every component feels like a chore. In short, it felt that like for every feature I was trying to force the browser to do what I wanted it to. All of it to produce the same features that already exist in the browser natively, but I needed to abstract away so that I could do things like manage the user's state. Instead of progressively enhancing the user's experience to provide far-reaching compatibility, we're starting at the most-complex state and attempting to work backwards.
## What do I use instead
For most projects, I've found you don't need a slow, JS-laden server-side generated web app with hydration and all of the overheads that come with it. If all I'm making is a static or infrequently-updated website, I'll use something like [Eleventy](https://11ty.dev) (which this website is built with). Otherwise, I'll generally opt for a "classic" multi-page application. A backend server that handles the business logic, and uses that to provide data to the frontend.
This means that I can build fast client-side experiences, without sending a huge javascript bundle. If I need some client-side interactivity, Web Components are standardised and at least [partially supported across all major browsers](https://caniuse.com/?search=web%20components). Likewise, if I need reusable components, templating languages like [Nunjucks](https://mozilla.github.io/nunjucks/) can provide that without adding too much complexity.

View File

@ -0,0 +1,39 @@
---
title: "Building my first emulator"
date: 2022-07-20T00:00:00
slug: building-my-first-emulator
---
![Screenshot of the Chip8 emulator displaying a white IBM logo on a black screen](./src/images/chip8_ibm_logo.png)
Recently I've become interested in embedded devices and developing for them. Initially, that started out as wanting to write a game for the Game Boy Advance, but if I'm being honest my knowledge wasn't up to scratch. So instead, I thought I'd try my hand at developing a simple emulator. After researching for a bit (read: about 5 minutes), I settled on a Chip8 emulator because a few threads said it was a good place to start, and there were a fair few useful resources around developing one. The finished product can be seen [on Github](https://github.com/LewisDaleUK/chip-8).
## Starting out
To begin with, I settled on a language to use. It made sense to go low-level, because the end-goal is getting better at coding with strict device constraints. I considered Rust, but decided against it because there was a lot more syntax and new concepts to learn. C fit the bill better: I'd used it before, the syntax is simple, and it's such a ubiquitous language there are _tons_ of resources around - you're rarely treading new ground with a language like C.
I followed [this guide from Tobias V. Langhoff](https://tobiasvl.github.io/blog/write-a-chip-8-emulator/) to describe the desired functionality, and used [Cookerly's Chip8 implementation](https://github.com/cookerlyk/Chip8/) as a basis for when I got stuck - this was particularly useful when dealing with the SDL2 graphics library.
## Building the emulator
Getting started was the hardest part. After defining the basic structure of the emulator - allocating space for 4096 bytes of RAM, loading the font into memory and adding a stack and different registers, I created the SDL2 window and sized it appropriately, and worked on drawing out the display. Initially, I simply treated the display as a 64 x 32px grid and manually drew out each pixel from the display - black if the bit was 0, white if it was 1. This worked fine, but was a bit slow, so I took some cues from Cookerly's code and used a texture instead. This works pretty neatly, and the code is a little bit cleaner.
Once all this was done, it was time to build the execution loop. It looks a bit like this:
* Fetch, decode, and execute the next instruction
* Decrement the two timers
* If updated, re-render the display
* handle any user input
Handling the instructions was easily the most time-consuming part. There are roughly 33 instructions, which meant a big old switch statement, with some nested switches for good measure. None of the instructions were particularly tough to implement, they're all relatively simple. The rest of the code was fairly simple to do - I already had the display rendering code written, I just had to call the function if the updated flag had been set. SDL2 makes taking user input easy, I just had to map it to the correct hexadecimal character for the appropriate key.
## Testing
Along with a few basic ROMs to test that it actually played games, I found a [Chip8 emulator test ROM](https://github.com/corax89/chip8-test-rom) that checked various opcodes and output a pass/fail on the screen. This proved to be a fairly useful sense check (although definitely not a complete test, as I still found some bugs when running other ROMs).
![Screenshot of the Chip8 emulator showing the Test Opcode ROM output. It shows a black screen with 6 rows and 3 columns of white text, each with a shorthand version of the opcode and the text 'OK' The first entry refers to opcode 0x3X00, so states 3X OK, and so on](./src/images/chip8_test-1024x541.png)
## Next steps
I'm more-or-less done with this project, so next I'd like to move onto building an emulator for more complicated devices, such as a Game Boy. However, if I do come back to the project I'd like to do more work into improving things like the CPU clock speed - right now it feels a bit too fast on some devices, and too slow on others, so needs some fine-tuning. There are also some ROMs that don't work, which I suspect is due to some differences in implementation across the devices that ran Chip8.

View File

@ -0,0 +1,13 @@
---
title: "Finding my old blog"
date: 2022-08-01T00:00:00
slug: finding-my-old-blog
---
Ive had a few blogs over the years - when I was younger I had a bad habit of starting a blog, and then occasionally throwing it away to move to a new platform. Theres a handful of blog posts out there that Ive written that are now lost to the internet.
By pure chance, however, I managed to find the source for one iteration of my blog, complete with 5 posts I made around mid-2014 - 2015. For posterity, Ive added them to this blog, all under the [archive category](https://lewisdale.dev/blog/category/archive).
I loved that era of my personal development - I was still fairly new to tech, I think I would have been in my second or industrial year of university, I was learning a ton and just wanted to share it with everyone.
Ive been making an effort to rekindle that passion I had for writing code, where Id have an idea, build it in a day or two, and then just want to share it. A big part of that means overcoming the perfectionism and imposter syndrome that has held me back in the last couple of years. Hopefully, though, it should let me produce some interesting work!

View File

@ -0,0 +1,13 @@
---
title: "The web is exhausting"
date: 2022-08-31T00:00:00
slug: the-web-is-exhausting
---
Ive been using the web in some form for over 20 years - granted, the early parts of that were heavily monitored because I was about 5 years old when we got dialup. But, a large part of my formative years were spent online, and it was such a different place compared to how it is today.
I remember spending hours on different websites, which were mostly forums dedicated to a single topic, speaking with a variety of people (although the same few names were usually present). The web felt *huge* back then, a vast array of small communities. It feels like the total commercialisation of the web has taken that from us, though. I now visit maybe 3 websites regularly, and just endlessly, mindlessly doomscroll. I can honestly say that using the web these days is so much less exciting and fun compared to what it used to be.
Its not just become exhausting as a consumer, though. A lot of the modern tooling available to web developers is overwhelmingly complicated. This post came about because I considered building a small web app using WebGL and Javascript - I decided I wanted a bit of type safety, and to use one library, (Three.js). Then I looked at the number of steps required just to get Typescript working nicely with ThreeJS and gave up. It shouldnt be this hard to build web applications, I shouldnt have a development directory that regularly exceeds 1gb per project because there are thousands of dependencies.
Its not all doom and gloom, thankfully. There are tons of people making interesting, fun, and exciting content for the web. Theyre just harder to find these days. And there *are* simple tools for building web applications (this blog is [built using one](https://11ty.dev/)), and I dont *need* the libraries or Typescript to build apps, theyre just nice to have.

View File

@ -0,0 +1,25 @@
---
title: "Diabetes (might have) saved my life"
date: 2022-09-27T00:00:00
slug: diabetes-might-have-saved-my-life
---
In April this year, I went to see the doctor and had some blood tests done as part of the checkups. When I spoke to the doctor about the results, he dropped a bombshell I wasnt-but-also-sort-of-was expecting: I was diabetic.
I had no symptoms, so I had no idea, but it still wasnt wholly unexpected. Over the previous few years my physical fitness had slid quite drastically, my diet was poor, and I didnt get a huge amount of exercies. The lockdowns in 2020 and early 2021 only exacerbated things.
A week later, I went to the diabetic clinic, and was prescribed some tablets to help control blood sugar levels, along with a warning about some of the fun side effects. I was also given some tips about diet and exercise, and sent on my way.
## [Fixing things](https://lewisdale.dev/post/diabetes-might-have-saved-my-life/#fixing-things)
This scared the hell out of me. I was young to get the diagnosis - 27 at the time - and its the first time Id ever really felt my own mortality. I have a daughter, and the thought of not being around for her was too much to contemplate.
So I started trying to improve things. I started small, just going for walks before work, at lunchtime, and then after work. I also restricted myself to a low-carb diet, which isnt something Id recommend to anyone who isnt trying to control blood sugar. Did you know you can actually have withdrawal symptoms from stopping sugar? They lasted about 5 days and made me feel a bit dizzy and nauseous.
After a little while I started cycling to places - at first just to the supermarket and back, eventually discovering that I actually *really* enjoy cycling, and now its my preferred way to commute. I started to see results quickly: I lost weight, had more energy, and slept better (and apparently no longer snore).
## [The aftermath](https://lewisdale.dev/post/diabetes-might-have-saved-my-life/#the-aftermath)
After about 3 months I went back to the doctors to have my blood sugars tested, and they were normal, something the clinician that was treating me said shes only seen 3 or 4 times in the 12 years shes been doing the job. Im now on a path to remove medication entirely and (hopefull) reverse the diagnosis.
But more importantly, getting this diagnosis was the kick I needed to sort my physical health out. In total Ive lost 27kg since diagnosis, and about 32kg from my peak weight (thats about 60 and 70lbs, respectively, if thats your thing). My blood pressure is lower, my heart rate is lower, I have way more energy, and Ive gained a hobby I genuinely enjoy. I do need to buy a lot of new clothes, though.

View File

@ -0,0 +1,46 @@
---
title: "Working retro with a Raspberry Pi"
date: 2022-09-10T00:00:00
slug: working-retro-with-a-raspberry-pi
---
Since moving to a co-working space, Ive started using a [Raspberry Pi 400](https://www.raspberrypi.com/products/raspberry-pi-400/) as my primary desktop PC at home to avoid transporting my laptop back-and-forth each day. Im mainly using it for writing blog posts, playing the odd game, and tinkering with some of my own projects.
After a few weeks of this, Ive got some thoughts on the pros and cons of using it.
## The setup
- Raspberry Pi 400 board, overclocked to 2.2GHz
- Logitech G203 optical mouse
- AOC 1080p monitor, connected via micro HDMI cable
- 128GB storage via micro SD
- [Twister OS](https://twisteros.com/)
- [SpaceVim](https://spacevim.org/) as my IDE
- [Lynx](https://lynx.invisible-island.net/) and Chromium for web browsing
I went with Twister OS over Raspberry Pi OS for the simple reason that it has a few retro themes that I like. After a bit of tinkering, I settled on the Windows 95 theme.
![The Twister OS desktop, themed to look like Windows 95, except with modern applications](./src/images/screenshot_twister-1024x576.png)
Chromium is *fine* for browsing most of the time, but its a resource hog, so sometimes Ill decide to
just use Lynx instead - its kind of refreshing to not be bombarded with images, ads, and Javascript.
For working, there is a build of VS Code available, but I found it to be pretty slow and a resource hog, much like
Chromium, so I use the SpaceVim distribution of NeoVim, which has just enough features to allow me to work relatively easily.
## [Pros](https://lewisdale.dev/post/working-retro-with-a-raspberry-pi/#pros)
- Its pretty snappy, especially after overclocking
- I get a nice pang of nostalgia when I boot it up into the Windows 95 theme
- Browsing with Lynx is great for filtering out a lot of the fluff
- The computer itself runs off a 15W USB-C power supply, which is nothing to sniff at with current energy prices
## Cons
- Chromium is pretty slow. I find a lot of pages hanging when scrolling, even with hardware acceleration enabled
- SpaceVim adds a whole new layer of complexity to Vim, which Ive not yet fully got to grips with
- Its a 1.8GHz ARM CPU, so its pretty limited - especially coming from the M1 in my Macbook
- The Pi 400 keyboard isnt great - its quite cramped and the keys need a surprising amount of force to push
- A lot of time needed to configure all of the different options
Overall, though, its not a bad working setup. Once its setup, it seems to be quite stable. And as a bonus its an exceptionally cheap system: the Pi 400 is £70, and comes embedded in a keyboard, meaning you only need to buy a mouse and a monitor. Id recommend it as either a backup desktop, a first computer, or a budget system for someone looking to get into computing.

View File

@ -0,0 +1,223 @@
---
title: "BASIC Interpreter Part 3: Copying values"
date: 2023-01-20T16:13:56
slug: basic-interpreter-part-3-copying-values
---
Now time for part three of my Sinclair BASIC Interpreter. In [the previous post](https://lewisdale.dev/post/basic-interpreter-part-two-variables/) I added the ability to assign data to a variable using the `LET` command. Now, it's time to use those variables in expressions. That means:
- Assigning one variable to another's value (`LET a=b`)
- Performing basic mathematical expressions
- Assignment using those maths expressions (`LET c=a*b`)
## Assigning variables
First things first, I need to update the enum I have for storing variables. Right now, I use an enum called Primitive, which has either an `Int (i64)` or a `String` option. To begin with, I'm going to try adding a third branch to the logic, called `Assignment`, which will also store a String - in this case it'll be a variable name. I'll also add a test to demonstrate this, which naturally fails right now (yay TDD).
```rust
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Primitive {
Int(i64),
String(String),
Assignment(String)
}
#[test]
fn it_assigns_one_variable_to_another() {
let line = 10 LET a=b;
let (_, result) = parse_line(line).unwrap();
let expected: Line = (
10,
Command::Var((
String::from(a),
Primitive::Assignment(String::from(b))
))
);
assert_eq!(result, expected);
}
```
So as a first pass, I just want to assume that everything else is correct in the line, and whatever is on the other side is a variable name. So, with a small amount of validation (that the first character isn't a digit), I'm just using `take_until1`, separated by the equals sign, to collect everything as a String:
```rust
fn parse_assignment(i: &str) -> IResult<&str, (String, Primitive)> {
let (i, _) = not(digit1)(i)?;
let (i, id) = take_until1(=)(i)?;
let (i, _) = tag(=)(i)?;
Ok((i, (id.to_string(), Primitive::Assignment(i.to_string()))))
}
fn parse_var(i: &str) -> IResult<&str, (String, Primitive)> {
alt((parse_int, parse_str, parse_assignment))(i)
}
```
This is extremely permissive in it's current form, so it needs to go at the very end of the `alt` combinator. But, it works - well, it passes the one test. But, when I run my entire test suite I find it's causes a regression. The parse should not accept strings with multi-character names, but this parser is permissive enough that it passes.
So, the next thing to do is to properly validate the variable name.
```rust
// Take everything until it hits a newline, if it does
fn consume_line(i: &str) -> IResult<&str, &str> {
take_while(|c| c != '\n')(i)
}
fn parse_str_variable_name(i: &str) -> IResult<&str, String> {
let (i, id) = terminated(
verify(anychar, |c| c.is_alphabetic()),
tag($)
)(i)?;
let id = format!({}$, id);
Ok((i, id))
}
fn parse_int_variable_name(i: &str) -> IResult<&str, String> {
map(
preceded(not(digit1), alphanumeric1),
String::from
)(i)
}
fn parse_assignment(i: &str) -> IResult<&str, (String, Primitive)> {
let (i, id) = alt((
parse_str_variable_name,
parse_int_variable_name
))(i)?;
let (i, _) = tag(=)(i)?;
let (i, assigned_variable) = consume_line(i)?;
Ok((i, (id.to_string(), Primitive::Assignment(assigned_variable.to_string()))))
}
```
And that's worked (for what I want, anyway):
`test result: ok. 14 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s`
But if I execute the simple program from the last post, and dump out the stored variables, I see:
```json
{apple: Int(10), b$: String(Hello), cat: Assignment(apple)}
```
Which isn't quite right, because we should be assigning by value, not by reference (which is effectively what's happening there). So, if I amend the execution loop to add a specific case for Assignment:
```rust
match item.1 {
Command::Print(line) => println!({}, line),
Command::GoTo(line) => iter.jump_to_line(line),
Command::Var((id, Primitive::Assignment(variable))) => {
self.vars.insert(id, self.vars.get(&variable).unwrap().clone());
}
Command::Var((id, var)) => {
self.vars.insert(id, var);
}
_ => panic!(Unrecognised command),
}
```
So now instead when I encounter an Assignment, I lookup the actual value of the variable I'm assigning from, and inserting that as it's own value. Now, the output looks like:
```json
{b$: String(Hello), apple: Int(10), cat: Int(10)}
```
## Printing
Okay, now I know how to read a variable, and assign a variable, I should now be able to print one out too. I'm going to add yet-another-enum, to represent a print output, which can either be a Value, or a Variable:
```rust
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PrintOutput {
Value(String),
Variable(String)
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Command {
Print(PrintOutput),
GoTo(usize),
Var((String, Primitive)),
None,
}
```
And then I have updated my parse for Print to read either a string, or a variable name:
```rust
fn parse_print_command(i: &str) -> IResult<&str, PrintOutput> {
alt((
map(alt((
parse_str_variable_name,
parse_int_variable_name
)), PrintOutput::Variable),
map(read_string, PrintOutput::Value)
))(i)
}
let (i, cmd) = match command {
PRINT => map(parse_print_command, Command::Print)(i)?,
...
}
```
And then update the execution loop to use either of these new branches:
```rust
match item.1 {
Command::Print(PrintOutput::Value(line)) => println!({}, line),
Command::Print(PrintOutput::Variable(variable)) => println!({:?}, self.vars.get(&variable).unwrap()),
...
}
```
Now to test it with a new BASIC program:
```basic
10 LET a$=Hello
20 LET b$=World
30 PRINT a$
40 PRINT b$
```
```command-line
╰─$ cargo run
Finished dev [unoptimized + debuginfo] target(s) in 0.00s
Running `target/debug/basic-interpreter`
String(Hello)
String(World)
```
## Quick addition: comments
And quickly, just because it'll be relatively simple, I'm going to also parse comments, which in BASIC are marked as `REM`:
```rust
fn match_command(i: &str) -> IResult<&str, &str> {
alt((tag(PRINT), tag(GO TO), tag(LET), tag(REM)))(i)
}
fn parse_command(i: &str) -> IResult<&str, Command> {
...
let (i, cmd) = match command {
...
REM => {
let (i, _) = consume_line(\n)(i)?;
(i, Command::Comment)
},
};
...
}
```
That's all I'll add to this part for now. But things are starting to come together! It won't be long before this can run the very-basic example program from [chapter 2 of the reference manual](https://worldofspectrum.org/ZXBasicManual/zxmanchap2.html):
```basic
10 REM temperature conversion
20 PRINT deg F, deg C
30 PRINT
40 INPUT Enter deg F, F
50 PRINT F,(F-32)*5/9
60 GO TO 40
```
As always, the source code is on [Github](https://github.com/lewisdaleuk/basic-interpreter) (although it's in dire need of some cleanup).

View File

@ -0,0 +1,156 @@
---
title: "BASIC Interpreter Part Two: Variables"
date: 2023-01-13T14:02:11
slug: basic-interpreter-part-two-variables
---
[Part One](https://lewisdale.dev/post/creating-a-sinclair-basic-interpreter/)
It's been a bit longer than I would have liked between the first post and this one, but life has a habit of getting in the way.
In my last post, I created a skeleton interpreter that could read very basic programs: we could `PRINT`, and use `GO TO` to jump to a different line in the program.
## Variables
That's not much of a program at all - programs have *data*, so the next logical step is adding support for variables.
Variables in BASIC have essentially three types:
- Number
- String
- Array
`Numbers` and strings are defined with the `LET` keyword, while arrays are denoted by `DIM`. There are also restrictions on variable naming. A number variable name can have any length, and can contain (but not begin with) a number. In this example, lines 10, 20, and 30 are all valid, however line 40 is not:
```basic
10 LET a=10
20 LET apples=5
30 LET b1234=10
40 LET 123=50
```
Strings are limited to single alphabetical character variable names, terminated by `$`:
```basic
10 a$=apples
```
For the sake of simplicity and brevity, we're not going to implement `Arrays` in this section - they'll come later. For now, I just want to focus on allowing us to read a variable, but not perform any operations on it.
To start with, we need to define an `Enum` to hold our variable data:
```rust
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Primitive {
Int(i64),
String(String),
}
```
I'm defining numbers as signed 64-bit integers, which is overkill when the original system only had 16-bit registers (despite being an 8-bit machine). This will *probably* lead to weird behaviour for programs that rely on hitting the ceiling for integers, so I'll go back and change it at some point. But for now, this is fine.
Next, we need to update the `Command` enum to accept variables, which I'm storing as a tuple of (`Variable name`, `Variable value`).
```rust
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Command {
Print(String),
GoTo(usize),
Var((String, Primitive)),
None,
}
```
First up, let's parse strings, because we've done that already for `Print` and in theory, it should be simple.
```rust
use nom::combinator::{map, verify};
fn parse_str(i: &str) -> IResult<&str, (String, Primitive)> {
let (i, id) = verify(anychar, |c| c.is_alphabetic())(i)?;
let (i, _) = tag($)(i)?;
let (i, _) = tag(=)(i)?;
let (i, var) = map(read_string, Primitive::String)(i)?;
let var_name = format!({}$, id);
Ok((i, (var_name, var)))
}
```
So, first of all we verify that the variable name conforms to the standard - we read a single `char`, and then use `verify` to confirm that it's alphabetic. The next two characters are fixed, `$` and `=`, and then finally we re-use `read_string` from our `Print` parser, and map it to our `Primitive::String` enum value. Then we just return the variable name and the value as a tuple.
Next, we want to parse numbers:
```rust
use nom::{
character::complete::{i64 as cci64, alphanumeric1, digit1},
combinator::{map, not}
}
fn parse_int(i: &str) -> IResult<&str, (String, Primitive)> {
let (i, _) = not(digit1)(i)?;
let (i, id) = alphanumeric1(i)?;
let (i, _) = tag(=)(i)?;
let (i, var) = map(cci64, Primitive::Int)(i)?;
Ok((i, (id.to_string(), var)))
}
```
Similar to parsing strings, we first check that the first character of the variable is not a digit, using the `not` parser. Then we read one-or-more alphanumeric characters, check the assignment operator is there, and then read and map a 64-bit signed integer to our `Primitive::Int`.
Finally, combine the two into a single parser using `alt`:
```rust
fn parse_var(i: &str) -> IResult<&str, (String, Primitive)> {
alt((
parse_int,
parse_str
))(i)
}
```
For the final step, we need to update our `Program` struct to store and handle variables. I'm lazy, so I'm going to use `HashMap` and not do any real checks before inserting:
```rust
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Program {
nodes: Node,
current: Node,
vars: HashMap<String, Primitive>
}
pub fn execute(&mut self) {
let mut iter = self.clone();
while let Some(node) = iter.next() {
if let Node::Link { item, next: _ } = node {
match item.1 {
Command::Print(line) => println!({}, line),
Command::GoTo(line) => iter.jump_to_line(line),
Command::Var((id, var)) => {
self.vars.insert(id, var);
},
_ => panic!(Unrecognised command),
}
};
}
}
}
```
And that's it! We've not added any additional output, but putting a `println` at the end of `execute` does show variables being assigned:
```basic
10 PRINT Hello world
20 LET apples=5
30 LET b$=Hello
```
```command-line
Finished dev [unoptimized + debuginfo] target(s) in 0.27s
Running `target/debug/basic-interpreter`
Hello World
{apple: Int(10), b$: String(Hello)}
```
Next up, we'll update `PRINT` so that it can print out a variable, and maybe perform simple operations on variables.

View File

@ -0,0 +1,19 @@
---
title: "Buy-it-for-life"
date: 2023-01-26T10:04:30
slug: buy-it-for-life
---
As a general rule, I buy something that's good enough for what I need it for. Partly this is because most of the time I haven't been able to buy the top-of-the-line product, and partly because it's usually in aid of a hobby that I'll [forget about in a week's time](https://lewisdale.dev/post/scratching-an-itch/).
But occasionally I'll let myself splurge on something that's better quality (not necessarily more expensive), because I think it's worth it. Sometimes it isn't, but there are a few things I own that have proven to be worth it.
For starters, my headphones. I'm not much of an audiophile, but there was a period of time where I was taking flights very frequently, and I'm absolutely terrified of flying. So, to combat it, I bought a pair of Sony WH-1000XM3 headphones after trying the noise cancellation in Berlin airport. Honestly, they've been fantastic. It's been over 4 years, and they still work perfectly, hold a good charge, and the noise cancellation is great. The ear cushions are starting to show a bit of wear, but they look simple to change, so I'll probably just replace them rather than buy new headphones.
Secondly is my watch - I do like to wear watches, and usually opt for Casio digitals. But my parents bought me a Citizen Eco Drive watch for my 13th birthday, and it still works today, some 15-or-so years later. It's even solar-powered, so I've never even had to change a battery. If I don't wear it for a while I just need to stick it on the windowsill for a day or two and it'll start ticking again.
![A silver citizen eco-drive watch on a wrist.](./src/images/IMG_1210-1-300x225.jpeg)
Finally, I bought a pair of decent leather boots a few years ago. They weren't ludicrously priced (even by my standards, and I'm notorious for buying the cheapest shoes possible), but they were more than I'd normally spend. But they've kept really well, the leather is in good condition - although in need of a good polish.
I'm aware that I'm extremely privileged to be able to own these things in the first place - they're luxuries, not necessities. I really hope this post doesn't come off like I'm bragging about having nice things - someone give me a clip round the ear if I sound like a pompous arse.

View File

@ -0,0 +1,405 @@
---
title: "Creating a Sinclair BASIC interpreter"
date: 2023-01-03T17:24:00
slug: creating-a-sinclair-basic-interpreter
---
Given my new website design, I figured I'd also have a go at making an interpreter for Sinclair BASIC so that I can run my own ZX Spectrum programs. I've got a few aims:
* Create something that can properly parse Sinclair BASIC
* Run the interpreted software in the browser
* Allow user input
What this isn't supposed to be:
* A perfectly-performant implementation
* An emulator
I'm not a Rust developer, and I'm only learning myself, so everything I write here will be suboptimal. If anyone reading wants to give me some pointers (pun intended), I'd be forever grateful.
I'm going to use the [ZX Basic Instruction Manual](https://worldofspectrum.org/ZXBasicManual/) as my main reference for this project.
Source code for the project is available on [Github](https://github.com/lewisdaleuk/basic-interpreter)
## Getting started
I'm using Rust for this, so I create a new Cargo project:
```bash
cargo new basic-interpreter
```
And I know I'm going to need to parse input, so I'm going to use [nom](https://docs.rs/nom/latest/nom/) for parsing:
```bash
cargo add nom
```
## Hello, World
Okay, so to begin with we're going to implement the simplest program we can: Hello World. It'll be a single line program that just prints out the string. Here's the program in question:
```basic
10 PRINT "Hello, World"
```
There are three parts to this statement:
1. The line number - this is in theory optional, but we'll handle that later
2. The command, in this case `PRINT`
3. The input. There is some different variations of input, but for now we're just going to handle single strings
### Parsing
Okay so let's get started with our parser! We'll start by writing a test for a line to make sure it parses okay:
```rust
#[test]
fn it_parses_a_print_command() {
let input = "10 PRINT \"Hello, world\"";
let expected = (10, super::Command::Print(String::from("Hello, world")));
let (_, result) = super::parse_line(input).unwrap();
assert_eq!(expected, result);
}
```
And let's create our types:
```rust
pub type Line = (u32, Command);
#[derive(Debug, PartialEq, Eq)]
pub enum Command {
Print(String),
None
}
```
To start with, we'll extract the line number:
```rust
pub fn parse_line(line: &str) -> IResult<&str, Line> {
let (i, line_number) = terminated(ccu32, tag(" "))(line)?;
Ok((line, (line_number, Command::None)))
}
```
Then we need to parse the command:
```rust
fn read_string(i: &str) -> IResult<&str, &str> {
take_until("\"")(i)
}
fn parse_command(i: &str) -> IResult<&str, Command> {
let (i, (command, _)) = tuple((take_until(" "), tag(" ")))(i)?;
let (i, cmd) = match command {
"PRINT" => map(delimited(tag("\""), read_string, tag("\"")), Command::Print)(i)?,
_ => (i , Command::None)
};
Ok((i, cmd))
}
pub fn parse_line(line: &str) -> IResult<&str, Line> {
let (i, line_number) = terminated(ccu32, tag(" "))(line)?;
let (i, command) = parse_command(i)?;
Ok((i, (line_number, command)))
}
```
Finally, let's write some code to quickly run our program:
```rust
use std::fs;
mod basic;
fn main() {
let file = fs::read_to_string("./src/inputs/hello_world.bas").unwrap();
let lines = file.lines().next().unwrap();
let (_, (_, command)) = basic::parse_line(lines).unwrap();
match command {
basic::Command::Print(input) => {
println!("{}", input);
}
_ => {
panic!("Command not recognised");
}
};
}
```
And we can run it:
```bash
$ cargo run
Compiling basic-interpreter v0.1.0 (/Users/lewis/development/personal/basic-interpreter)
Finished dev [unoptimized + debuginfo] target(s) in 0.51s
Running `target/debug/basic-interpreter`
Hello, world
```
Cool, that works!
### Escaped characters
Okay, but what about if I change my program to print quote characters (`"`)?. To do this, we need to escape the strings:
```basic
10 PRINT "Hello, \"World\""
```
Which we would expect to result in:
```bash
Hello, "World"
```
However because we're using `take_until`, our parser stops at the first escaped quote, resulting in:
```bash
Hello, \
```
To fix this, we need to use the `escaped_transform` parser:
```rust
fn read_string(i: &str) -> IResult<&str, &str> {
delimited(
tag("\""),
escaped_transform(
none_of("\\\""),
'\',
alt((value("\\", tag("\\")), value("\"", tag("\"")))),
),
tag("\""),
)(i)
}
```
What we're saying here is accept any character that doesn't match either `\` or `"` (`none_of("\\\"")`), where `\` is our escape character. Finally, we match escaped quote characters and escaped backslashes and un-escape them so that they print properly (otherwise our output will include escape characters when printed).
## Basic looping
Alright, next is everybody's favourite command: `GO TO`, the lets us jump to a a different part of the program.: Here's a short program using our two commands that will print "Hello World" infintely:
```basic
10 PRINT "Hello World"
20 GO TO 10
```
### Parsing
The first thing that leaps out to me from this command is that `GO TO` contains a space. That won't work with our current parser, which reads a string until it meets a space. Instead, we should try and be more specific:
```rust
#[derive(Debug, PartialEq, Eq)]
pub enum Command {
Print(String),
GoTo(usize),
None,
}
fn match_command(i: &str) -> IResult<&str, &str> {
alt((
tag("PRINT"),
tag("GO TO")
))(i)
}
fn parse_command(i: &str) -> IResult<&str, Command> {
let (i, command): (&str, &str) = match_command(i).unwrap_or((i, ""));
println!("{}", command);
let (i, _) = tag(" ")(i)?;
let (i, cmd) = match command {
"PRINT" => map(read_string, Command::Print)(i)?,
"GO TO" => map(ccu64, |line| Command::GoTo(line as usize))(i)?,
_ => (i, Command::None),
};
Ok((i, cmd))
}
```
### Building a program
For `GO TO` to function, we need a structure to actually store our program. We need to:
* Store a command as a line
* Easily move to the next line
* Search a program for a line by number
(A real compiler would do lots of clever things here that would enable it to drop unreachable code and optimise things, but that's not what we're here for).
We... might need a Linked List. They're pretty notoriously a headache in Rust due to ownership rules - but we can use `Box` to help mitigate this:
```rust
pub enum Node {
None,
Link { item: Line, next: Box<Node> }
}
```
We'll need to add the `Copy` trait to our `Command` enum for this to work:
```rust
#[derive(Debug, PartialEq, Eq, Copy)]
pub enum Command {
Print(String),
GoTo(usize),
None,
}
```
And then implement a (very basic) Linked List:
```rust
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Node {
None,
Link { item: Line, next: Box<Node> }
}
impl Node {
fn push(&mut self, val: Line) {
*self = match self {
Self::Link { item, next } => {
next.push(val);
Self::Link { item: item.clone(), next: next.clone() }
},
Self::None => Self::Link { item: val, next: Box::new(Self::None) }
}
}
}
```
We also want to be able to find a line, so we'll write a simple `find_line` function too:
```rust
fn find_line(&self, line: usize) -> Option<Node> {
if let Self::Link { item, next } = self {
if item.0 == line {
Some(self.clone())
} else {
next.find(line)
}
} else {
None
}
}
```
Finally, build a parser to read every line and store it in a list of nodes:
```rust
pub fn read_program(i: &str) -> IResult<&str, Node> {
let (i, lines) = separated_list0(tag("\n"), parse_line)(i)?;
let mut node = Node::None;
for line in lines.iter() {
node.push(line.clone());
}
Ok((i, node))
}
```
### Running the program
We have a list of instructions. Now we need to martial them and provide an interface to run them. To do this, I've created a `Program` struct that holds a reference to the complete program, and a cursor for the instruction currently being executed:
```rust
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Program {
nodes: Node,
current: Node,
}
```
I'm also going to implement `Iterator` for the struct, so that we can easily loop over all of the instructions:
```rust
impl Iterator for Program {
type Item = Node;
fn next(&mut self) -> Option<Self::Item> {
let curr = self.current.clone();
match &self.current {
Node::Link { item: _, next } => {
self.current = *next.clone();
Some(curr)
}
Node::None => None,
}
}
}
```
Then we add an `execute` function, as well as a function to jump to a line:
```rust
impl Program {
pub fn new(node: Node) -> Self {
Program {
nodes: node.clone(),
current: node,
}
}
pub fn to_line(&mut self, line: usize) {
if let Some(node) = self.nodes.find_line(line) {
self.current = node;
} else {
panic!("Cannot jump to line {}, it does not exist", line);
}
}
pub fn execute(&mut self) {
let mut iter = self.clone();
while let Some(node) = iter.next() {
match node {
Node::Link { item, next } => {
match item.1 {
Command::Print(line) => println!("{}", line),
Command::GoTo(line) => iter.to_line(line),
_ => panic!("Unrecognised command")
}
},
_ => ()
};
}
}
}
```
Now, we can run a new sample program, using the provided struct:
```rust
fn main() {
let file = fs::read_to_string("./inputs/simple_program.bas").unwrap();
let (_, mut program) = basic::read_program(&file).unwrap();
program.execute();
}
```
```
$ cargo run
Finished dev [unoptimized + debuginfo] target(s) in 0.00s
Running `target/debug/basic-interpreter`
Hello World
Hello World
Hello World
Hello World
Hello World
(truncated ∞ lines)
```
I think that's where I'll leave this (surprisingly long) post. This was loads of fun, to be honest. I think for my next post I'll be adding some logic statements (`IF`) - aim to get this as close to a functioning piece of software as quickly as possible, and then work on some of the more fun commands.
I'm also going to refactor things a bit, because my `basic.rs` file got quite messy towards the end.

View File

@ -0,0 +1,19 @@
---
title: "Finding my old hard drive"
date: 2023-01-14T20:00:32
slug: finding-my-old-hard-drive
---
I was digging through some old belongings when I pulled out the laptop that I had during my 4th and final years at uni (I did an integrated MEng, so my degree took 5 years including a year in industry). I got pretty excited because I hoped that it would have the source code for my dissertation on it.
I lost the source because I lost access to the VPS that hosted my private Git server, and I didn't maintain any backups. It was dumb, but I was young and naïve.
I knew that the hard drive would have some form of Linux on it, because that's what I used all through university (various incarnations of Ubuntu or Arch, with i3 for window management). So I tried sticking it in my desktop to see if I could boot it, but no dice - possibly a lack of drivers, or it could be the weird power-management problems that my desktop has.
Next up, I thought I might be able to read the disk from either my Windows desktop, or my Mac laptop. I stuck it in a spare USB caddy, and hoped for the best. Still no joy, the disk is ext4 formatted - unreadable on both systems unless I use a virtual machine.
Shit, was I going to have to install Ubuntu on my desktop? I've no aversion to it, but that desktop is theoretically for during the 30-40 minutes a month I have the time to play video games. Then I remembered, on my desk is my trust Raspberry Pi 400. It took a bit of fiddling with dodgy USB connections, but eventually I got it to mount.
A quick search through the directories and I must have wiped it not too long before stopping using it, because there were only two or three projects. That meant, unfortunately, no dissertation. But I did find something else: Janet!
Janet was the name I gave to an IRC bot I started writing that used NLP to parse commands, sort of like a Discord bot, but a bit weirder and didn't quite work. I have [uploaded the source to Github](https://github.com/LewisDaleUK/janet), it's surprisingly well-written given it's purpose and age, I might even pick it back up if I get the urge to.

View File

@ -0,0 +1,54 @@
---
title: "Import posts from an RSS feed into WordPress"
date: 2023-01-07T22:15:06
slug: import-posts-from-an-rss-feed-into-wordpress
---
I decided to migrate my blog to Wordpress, for the simple reason that I was finding updating Markdown files manually a headache, and I want to be able to write on devices that aren't my laptop.
But first I had to move my content over - so I used the Wordpress REST API to copy my content from my RSS feed.
## Create an application password
In your Wordpress admin panel, go to `Users -> Profile`. Scroll down to the section that says "Application passwords", generate a new one, copy and store it somewhere.
## Consuming the feed and populating Wordpress
I used [Deno](https://deno.land/) to do this, but the code would be pretty similar for Node:
```javascript
import { parse } from "https://deno.land/x/xml@2.0.4/mod.ts";
const root = `${process.env.WP_SITE_URL}/wp-json/wp/v2/posts`;
const password = process.env.WP_APP_PASSWORD; // This is your application password
const user = process.env.WP_USER; // This is the username of the user the application password belongs to
const auth = btoa(`${user}:${password}`);
const response = await fetch(process.env.SOURCE_RSS_FEED);
const feed = await response.text();
const document = parse(feed);
for (const entry of document.feed.entry) {
const body = {
title: entry.title,
content: entry.content["#text"],
status: 'publish',
date: entry.published,
};
const res = await fetch(root, {
method: "POST",
body: JSON.stringify(body),
headers: {
"Authorization": `Basic ${auth}`,
"Content-Type": "application/json",
}
});
if (res.status >= 400) {
console.error(`Request failed with status ${res.status}: ${res.statusText}`);
console.error(await res.text());
}
}
```
![Screenshot of the website home page. Shows the title "LewisDale.dev" and a menu with Home, Blog, and Microblog options.](./src/images/screenshot-2023-01-02-at-02.09.12.png)

View File

@ -0,0 +1,17 @@
---
title: "Migrating to WordPress"
date: 2023-01-08T10:53:07
slug: migrating-to-wordpress
---
After messing around for the better part of a week with custom builds of Netlify CMS and varying levels of complexity, I bit the bullet and just migrated my blog to Wordpress. My original intention was for it to be quick and easy, to give me way more control over posting and make things easier.
That's rarely the case, and I ran into a few issues with clashing plugins, and of course I had to migrate my theme to make it work with Wordpress. But *hopefully* now I won't have as many issues.
I've been meaning to re-learn how Wordpress works anyway, so this was as good of a time as any. And it means I get to put together a nice little utility plugin that makes my life easier. Here are the plugins I'm using:
- [IndieWeb](https://wordpress.org/plugins/indieweb/) for interacting with other websites
- [Perfect Images](https://wordpress.org/plugins/wp-retina-2x/) for displaying scaled images
- [WP Super Cache](https://wordpress.org/plugins/wp-super-cache/) to cache my pages and (hopefully) improve performance
I'd also like to find a way to serve WebP images instead of jpegs where possible - Jetpack can do this via its CDN, but it doesn't play nicely with Perfect Images, so I might have to wrangle something myself.

View File

@ -0,0 +1,15 @@
---
title: "New year, new blog design"
date: 2023-01-02T09:00:00
slug: new-year-new-blog-design
---
Its time for a new website layout. My previous one sort of evolved over a year of messing around and not quite finding something I was happy with. I didnt really feel like it represented *me*.
Ive always been a fan of ZX Spectrums, despite them coming a bit before my time. I picked up a +3A in a charity shop once, and its one of my favourite things to occasionally boot up.
So what better than to model my site on that classic design? I got to do some nice CSS gradients for the rainbow bar, and took the opportunity to strip some of the fluff out of my site.
![Screenshot of the website home page. Shows the title \"LewisDale.dev\" and a menu with Home, Blog, and Microblog options.](./src/images/screenshot-2023-01-02-at-02.09.12-1024x606.png)
Ive reduced the website to two components for now: the blog, and the microblog. Therell be some new fluff added in no time, Im sure, but it was getting a bit much to manage.

View File

@ -0,0 +1,11 @@
---
title: "Note-taking: 1 week in"
date: 2023-01-17T17:04:46
slug: note-taking-1-week-in
---
A week ago I wrote about wanting to [get better at note-taking](https://lewisdale.dev/post/note-taking-and-retaining-information/). My goal was to try and use Obsidian.md to take notes.
Well, a week in and it's gone surprisingly well. I configured a template for my daily note with a todo list, list of meetings, and notes. I have configured Obsidian to open a new daily note at the start of each day. This means I don't need to manually create notes, which added mental overhead.
A week in and I've taken notes every single day! They're fairly short, but they're only for me, and it means I've actually got a log of what I was thinking through the day.

View File

@ -0,0 +1,13 @@
---
title: "Note-taking and retaining information"
date: 2023-01-10T09:25:47
slug: note-taking-and-retaining-information
---
One of the things I struggle with the most is note-taking: I can't do it during meetings because then I'm distracted by the writing and not, you know, participating. So I end up not doing it at all, and then I have absolutely no record of what was said and often forget things.
I'm going to _try_ to improve this, hopefully with a decent set up that works for me. I quite like writing in Markdown, purely because I know how to be productive with it (unless I need to add a link, in which case I will get the square braces and parentheses the wrong way round 100% of the time). So my plan is to use [Obsidian](https://obsidian.md) to take my notes, mostly because I've seen people write good things about it, and at the end of the day it's a text editor pointed at a directory, so it's easy to set up.
I'm not going to take notes during meetings, but I will set aside 5 minutes after each meeting to write up my thoughts while they're fresh. I'm also going to try and write down anything I learn while working on a project/with a service, in the hope that I'll be able to share it with others and it be useful.
Plus, because I'm producing Markdown files, if I decide I want to scratch an itch I could build myself a little Wiki site using Eleventy. Because I'm nothing if not great at overcomplicating my personal tech stack.

View File

@ -0,0 +1,11 @@
---
title: "Scratching an itch"
date: 2023-01-20T08:16:44
slug: scratching-an-itch
---
For the last two weeks or so I've had the urge to buy and older PC, just for the purposes of messing about with the hardware and putting an overly complex linux config on it. I know that if I buy it, I'll do that for a couple of hours and then never touch it again, but I still want to do it.
It's been like that my entire life, and it's an expensive habit. I've counted out about 30 different hobbies I've tried - and usually spent money on - without continuing them for any length of time.
It's very frustrating, but I need something to scratch the itch, preferably without dropping money on expensive, obsolete hardware.

View File

@ -0,0 +1,11 @@
---
title: "TIL: Adding a subreddit RSS feed"
date: 2023-01-15T10:49:23
slug: til-adding-a-subreddit-rss-feed
tags: [til]
---
I like to read some story-based Subreddits, like [/r/TalesFromTechSupport](https://reddit.com/r/talesfromtechsupport), but also don't like having to go to the Reddit app (well, Apollo) specifically to read these stories because I end up missing them.
As it turns out, Reddit does publish RSS feeds for Subreddits, at ` https://www.reddit.com/r/<subreddit>/new/.rss?sort=new`.
I've added this to my RSS reader and new posts now show up in my feeds. It works best with relatively low-volume Subreddits - a popular one would be a bit overwhelming for me.

View File

@ -0,0 +1,42 @@
---
title: "TIL: Adding text borders with CSS"
date: 2023-01-12T19:25:42
slug: til-adding-text-borders-with-css
tags: [til]
---
After checking my website on mobile, I realised that I'd made a mistake, and included a pretty bad colour contrast issue on the page:
![Screenshot](./src/images/Screenshot-2023-01-12-at-09.34.55.png)
So I took to Google, and discovered that I could use the `-webkit-text-stoke` CSS property ([MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/CSS/-webkit-text-stroke)), which will add a border to the characters. [It's well-supported](https://caniuse.com/?search=-webkit-text-stroke), despite using the `-webkit` prefix:
```css
.text {
color: white;
-webkit-text-stroke: 1px black;
}
```
This worked, but because the stroke goes on the inside of the text, rather than the outside, the result is much harder to read and doesn't look great.
![Screenshot](./src/images/Screenshot-2023-01-12-at-09.41.39.png)
So I took to Google again, and found a [StackOverflow answer](https://stackoverflow.com/a/47511171) that gave me a really useful snippet, using `text-shadow`:
```css
.text {
color: white;
text-shadow: -2px -2px 0 #000,
0 -2px 0 #000,
2px -2px 0 #000,
2px 0 0 #000,
2px 2px 0 #000,
0 2px 0 #000,
-2px 2px 0 #000,
-2px 0 0 #000;
}
```
This is a much better result - it produces a nice 2px border around the text, which makes it easier to read against bright backgrounds, but doesn't impact the legibility of the individual characters.
![Screenshot](./src/images/Screenshot-2023-01-12-at-19.23.00-1024x147.png)

View File

@ -0,0 +1,46 @@
---
title: "TIL: Forwarding ports using SSH and proxying with Apache"
date: 2023-01-16T08:33:18
slug: til-forwarding-ports-using-ssh-and-proxying-with-apache
tags: [til]
---
I have two servers: a VPS, and a small server that I primarily use on my home network. The server at home is quite a bit more powerful than my VPS, and significantly cheaper to run, so I'd like to start moving some of my hosted services to it. The problem is that I don't have a static IP, so I'd need to get dynamic DNS setup and open the right ports on my router, which is a bit tedious (and not officially supported by my ISP).
My interim solution is to use port-forwarding via SSH, wherein I can specify a target host and port, and map it to a local port, so that requests on the target machine that go to `http://localhost:<port>` will instead be directed to the machine running SSH ([relevant guide on Ubuntu documentation](https://help.ubuntu.com/community/SSH/OpenSSH/PortForwarding)). The basic command looks like this:
```bash
ssh -R 8080:localhost:3000 user@my-remote-server
```
Which is useful, but also just opens a regular SSH connection, which has to then be kept alive - I could use `screen` or `tmux` but that's still not ideal, if the pipe gets broken I'll have to reconnect it manually.
A tool called [autossh](https://github.com/Autossh/autossh) can be used to run the SSH connection in the background, and will automatically spawn a new process if the old one fails:
```bash
autossh -M <echo port> -f -N -T 8080:localhost:3000 user@my-remote-server
```
`<echo port>` is just an open port that autossh will attach to, and use that to check if the connection is running. `-f` runs the command in the background, so I don't need to keep a terminal alive, and `-N and -T` mean no command executed, and don't open a tty connection, respectively.
So now the port has been forwarded, but it's probably not accessible to the outside world, partly because unless the relevant ssh config setting is set on the remote machine (`GatewayPorts yes`), and the port is open in the server firewall, this will only be available on `localhost:8080`. So to use this, we also need to proxy queries.
I'm using Apache on my server, because that's what the Wordpress auto-installer setup for me and I'm lazy, but you could also use Nginx:
```apacheconf
<VirtualHost my.subdomain:80>
ServerAdmin you@yourtld
ServerName my.subdomain
<Proxy *>
Order allow,deny
Allow from all
</Proxy>
# http://httpd.apache.org/docs/2.0/mod/core.html#limitrequestbody
LimitRequestBody 0
# Pass requests on
ProxyPass / http://localhost:8080/ keepalive=On
ProxyPassReverse / http://localhost:8080/
</VirtualHost>
```
I'm not including SSL in the above config, which you absolutely should enable for this, because I use [certbot](https://certbot.eff.org/), which generates the configuration for me. Add this to `/etc/apache2/sites-available/<site-name>.conf` and activate it with `a2siteenable <site-name>`. Restart apache, and assuming the domain is configured correctly, it should be accessible at the domain configured as `ServerName`.

View File

@ -0,0 +1,13 @@
---
title: "Using mirror repositories for code backups"
date: 2023-01-21T09:03:29
slug: using-mirror-repositories-for-code-backups
---
This is a neat feature I just discovered for git server backups. I run a local [Gitea](http://gitea.io) server that I use for development on my own projects. I've only configured it recently, so there's not much on there.
I've [mentioned in the past](https://lewisdale.dev/post/finding-my-old-hard-drive/) that I once lost access to my personal git server. It stored important content including my dissertation, which then disappeared into the void. So one thing I was quite keen on doing was making sure that I'm able to backup my git server.
It turns out that's really easy to do with Gitea. You can set up a [repository mirror](https://docs.gitea.io/en-us/repo-mirror/), which will automatically sync itself with whatever remote you configure. I've set up mine to sync with my Github, so I can push to my private git server, and on a preconfigured interval it will push all my commits up. You can set it to sync on every commit, but that felt unnecessary.
I quite like this approach, it feels very [POSSE-like](https://indieweb.org/POSSE): I retain ownership of my content, but it's syndicated to other places and I get some safety and redundancy.

View File

@ -0,0 +1,11 @@
---
title: "A bit of housekeeping"
date: 2023-10-18T13:02:59
slug: a-bit-of-housekeeping
tags: [meta]
---
I've been doing a bit of housekeeping with my blog today, specifically I've been adding tags to my posts. I've not gone through and added them historically, but now they're listed underneath a post, and clicking the link should take you through to the full post history for that tag.
Each tag also has a separate RSS/Atom/JSON feed, which is just appending `/feed/` to the end of the URL (e.g. https://lewisdale.dev/post/tag/cycling/feed/atom). Now, if anyone is using a feed reader to subscribe to my blog you can just get the posts you want.
Hopefully this makes it somewhat easier for people to read things that might interest them, but who knows.

View File

@ -0,0 +1,43 @@
---
title: "Autoposting to FireFish from WordPress"
date: 2023-10-23T07:12:53
slug: autoposting-to-firefish-from-wordpress
tags: [code, fediverse, php, wordpress]
---
Back when I was using Mastodon, autoposting my blog posts was easy - there's no shortage of extensions for handling posting to Mastodon. But I switched to FireFish a little while ago (and for better or worse, can't easily switch back without screwing up my server's ability to federate). As far as I can find, there aren't any extensions for handling cross-posting to FireFish, so now I'm doing it manually.
It turns out at every FireFish instance also comes with it's own api docs, hosted at `https://<instance-url>/api-docs`, so that made life a bit easier. I started off by generating an API token for this task, which you can do by going to `Settings -> API -> Generate Access Token`. Make sure you save the token, because it'll only be displayed once.
Then, I added a function to a custom Wordpress plugin (or you can add it to your active theme if you know it's not going to change):
```php
function publish_to_firefish($post_id, $post, $update) {
if (get_post_status($post_id) === "publish") {
$excerpt = format_post_for_mastodon("", $post_id);
wp_remote_post(
"https://social.lewisdale.dev/api/notes/create",
["body" => ["text" => $excerpt, "i" => "<Your API Token>"]]
);
}
}
```
The `format_post_for_mastodon` function is one I was using back when I was using the syndication plugin to share to Mastodon, and it just creates the post with the excerpt:
```php
function format_post_for_mastodon( $status, $post ) {
$status = "New post: " . $post->post_title . "\n\n";
$status .= html_entity_decode( get_the_excerpt($post) );
$status .= "\n\n" . get_permalink( $post );
return $status;
}
```
Finally, hook into the `save_post` action:
```php
add_action("save_post", "publish_to_firefish", 10, 2);
```
And that should be it! With any luck, this post should have auto-shared to the fediverse. Now I just need to get my https://brid.gy clone for FireFish working so that replies, boosts, and likes are properly syndicated over.

View File

@ -0,0 +1,19 @@
---
title: "Everything&#8217;s a bit shit, isn&#8217;t it?"
date: 2023-10-03T19:09:31
slug: everythings-a-bit-shit-isnt-it
tags: [rants]
---
This is going to be a moany post, fair warning.
But everything's a bit... shit. The Tories have had their annual conference this week, and as per have decided to rub it in our faces by holding it in Manchester (while ignoring any part of the country that isn't London completely when it comes to policy).
Whether it's Liz Truss saying fuck the environment and making suggestions on financial policy (yes, the same Liz Truss who was removed from her post for her absolutely abysmal financial policy), or Suella Braverman being just.. the worst. I'm not going to repeat what she's said here, but future school classes will study it as an example of how rhetoric can give rise to facism.
Then you've got their new "stop being mean to drivers" position, which is grating, to say the least. Poor drivers, who have the entire country's infrastructure tailored to them, need babying so they can keep driving their big polluting machines to the shops and back.
And that's just this week! On top of the usual constant stirring up of racism, homophobia, transphobia, and all the other nasty shite that seem to be on the rise at the minute. It feels inescapable at the minute.
It might be the change in the weather (which is still unseasonably, terrifyingly warm - if not a bit wet), but I'm feeling pretty fed up with it all.
There's no outcome to this post, or anything. I just needed to vent a bit.

View File

@ -0,0 +1,79 @@
---
title: "Identifying external links with CSS"
date: 2023-10-25T07:33:53
slug: identifying-external-links-with-css
tags: [code, css, eleventy]
---
Inspired by [this post on CSS In Real Life](https://css-irl.info/styling-external-links-with-attribute-selectors/), I thought I'd share how I'm styling external links on this site too.
## Marking up external links
I'm using [Eleventy](https://11ty.dev) to generate the site, and have a Markdown plugin that's borrowed from [the Eleventy-Excellent starter](https://github.com/madrilene/eleventy-excellent) (which is a site starter that really does live up to the name). This means that I have the following snippet in my Markdown plugin:
```javascript
.use(markdownItLinkAttributes, [
{
// match external links
matcher(href) {
return href.match(/^https?:\/\//);
},
attrs: {
target: '_blank',
rel: 'noreferrer noopener'
}
}
])
```
So all of my external links have `target="_blank"` attributes, meaning they'll open in a new tab/window when clicked. That makes it a bit easier to target them with CSS.
## The selector
```css
a[target="_blank"]::after {
content: " \f08e";
font-family: "font awesome 6 free";
font-style: normal;
font-variant: normal;
text-rendering: auto;
font-weight: 900;
-webkit-font-smoothing: antialiased;
font-size: var(--text-size-xs);
vertical-align: super;
}
```
I'm lazy and bad at making icons, so I'm just using FontAwesome to place the unicode icon in there. Everything else is just to size it correctly and make it not look weird.
That's pretty much all I needed, but the CSS In Real Life article had a little section on preventing orphaned icons that I'd have a go at implementing.
## Stopping the icons wrapping
The suggestion from the blog, to use `position: absolute` with some right-padding on the anchor tag didn't quite work for me. Instead I had to do all of that, but also make the anchor tags `display: inline-block` and `position: relative`, and then set `right: 0` on the `::after` pseudo-element. On top of that I added a bit of padding to stop things getting too squashed together on smaller displays, so this is what I've wound up with:
```css
&[target="_blank"] {
padding-right: 1.5ch;
display: inline-block;
position: relative;
}
&[target="_blank"]::after {
content: " \f08e";
font-family: "font awesome 6 free";
font-style: normal;
font-variant: normal;
text-rendering: auto;
font-weight: 900;
-webkit-font-smoothing: antialiased;
font-size: var(--text-size-xs);
vertical-align: super;
width: 2ch;
padding: 0 .3ch;
display: inline-block;
position: absolute;
right: 0;
}
```
Seems to work so far; I'm not hugely keen on the .3ch padding, I mostly got there through trying values to see what looked the least-weird. I'll probably adjust it some more later.

View File

@ -0,0 +1,19 @@
---
title: "Planning my 2024 charity bike ride(s)"
date: 2023-10-05T20:00:55
slug: planning-my-2024-charity-bike-rides
tags: [cycling]
---
I never actually wrote a blog post after it, but I completed my [first cycling event](https://lewisdale.dev/post/signing-up-for-my-first-cycling-event/) in July. It went better than expected! I initially planned to do 100km, but beat my training targets and wound up doing 214km in about 7 hours 20 minutes. In total I raised [£936 for The Christie](https://www.justgiving.com/fundraising/lewis-dale-bikes)!
I followed it up with Manchester 100 Mile ride in September, during which I managed to maintain an even faster pace than I did in the previous event, despite having a bit of a mechanical failure that meant I was stuck in my smaller chainring for half the ride. My legs ached for about 3 days afterwards, but it was worth it.
## Next year
So, what's next? A single big ride is nothing new, and I didn't actually raise any money for the second one. Instead, I think I'd like to commit to a _set_ of events. Ideally, these will add up to a fairly significant distance - currently, I'm thinking 1000km over a few different rides.
This will all coincide with my 30th birthday, so it would be good to get a good schedule prepared as a way to celebrate. And hopefully, raise money for a good cause at the same time. I've not decided on a charity yet, [Cyclists Fighting Cancer](https://www.cyclistsfc.org.uk/) is a possibility.
One takeaway from the two events I did over the summer was that those rides were _much_ more fun in a group. Both times, I fell into riding with a few people and I found it made the ride much more tolerable (as well as faster - taking turns leading meant we all got to have a bit of a break).
So I think I'd like to find a group I could do some/all of these with. If you read this blog, and like the sound of doing one or more long-distance rides in the UK next summer to raise money for charity, [get in touch](https://lewisdale.dev/links)! I'd love for people to join me!

View File

@ -0,0 +1,34 @@
---
title: "App defaults"
date: 2023-11-28T13:25:35
slug: app-defaults
---
Alright, I'll jump on [the bandwagon](https://defaults.rknight.me/). Here are my default apps:
* **Mail Client:** Apple Mail
* **Mail Server:** iCloud
* **Notes:** Obsidian
* **To-Do:** Also Obsidian
* **Photography:** Camera.app
* **Photo Management:** Photos.app
* **Calendar:** Calendar.app
* **Cloud file storage:** iCloud
* **RSS:** NetNewsWire
* **Contacts:** Contacts.aepp
* **Browser:** Arc
* **Chat:** WhatsApp, The Lounge
* **Bookmarks:** Arc
* **Read It Later:** My unreliable memory + open tabs
* **Word Processing:** Obsidian
* **Spreadsheets:** Google Sheets
* **Presentations:** Eleventy
* **Shopping Lists:** Notes.app
* **Meal Planning:** Obsidian
* **Budgeting & Personal Finance:** lol
* **News:** NetNewsWire
* **Music:** Apple Music
* **Podcasts:** Apple Podcasts
* **Password Management:** 1Password
Total score: 36

View File

@ -0,0 +1,25 @@
---
title: "First thoughts: Sensah Team Pro shifters"
date: 2023-11-23T23:00:32
slug: first-thoughts-sensah-team-pro-shifters
tags: [cycling]
---
My Shimano front shifter recently gave out while I was on a ride:
![A road bike leaning against a wall. The front shifter is stuck inwards at an awkward angle](./src/images/broken-shifter.jpeg)
Not an ideal situation, really. But apparently it's a pretty common failure on the 105 shifters - previously I was able to bodge it by gluing a piece of plastic to the shifting arm, but now it looks like the internal spring has gone.
So, not wanting to fork out £200 for a new set of shifters, I figured I'd try out a set I've had my eye on for a while: The [Sensah Team Pro 11-speed shifters](https://www.aliexpress.com/item/1005002598141724.html) from AliExpress.
Most of the Sensah shifters are Shimano-compatible, with the Empire shifters being the exception, as they're SRAM-only. But the reviews online were favourable, and given it came to a total of £65 including delivery I thought "why not?".
They took about 5 days to arrive and honestly, I was pretty impressed.
![A pair of road bike shifters in the box](./src/images/WhatsApp-Image-2023-11-11-at-10.19.25.jpeg)
They feel really sturdy, and if I didn't know otherwise I'd think they were far more expensive than they actually cost. Fitting them was pretty simple too - I had to reroute my cabling slightly because the shifting cable holes are positioned differently. Once that was done though, they indexed really nicely.
![Road bike with new shifters applied. The "TEAM PRO" logo is visible](./src/images/bike-new-shifters.jpeg)
I've not taken them for a ride yet - I've still got a little bit to do on the bike first, like fix the brakes. I think they'll take a bit of getting used to - they've got a single lever, so you do a short tap to shift downwards, a longer to shift upwards, and then an extra-long tap to shift 3 gears up (on the rear, anyway). But, so far I'm really happy with them.

View File

@ -0,0 +1,42 @@
---
title: "Using Obsidian for meal planning"
date: 2023-11-30T21:26:43
slug: using-obsidian-for-meal-planning
---
In my [App defaults post](https://lewisdale.dev/post/app-defaults), I mentioned I use [Obsidian](https://obsidian.md) for various tasks, one of them being meal planning, so I thought I'd share how I actually manage that.
## Requirements
Obsidian, obviously.
Secondly, it relies on having the [Dataview](https://github.com/blacksmithgu/obsidian-dataview) plugin installed. Dataview is a really neat plugin that provides a simple scripting language over your Obsidian vault, that you can use to query and display data from your files.
## Storing the data
My Obsidian vault is also where I share my recipes. Each recipe is stored under the `recipes/` directory, and when I add a new one I add tags that I can then query against. For example, my recipe for a Broccoli and Cheddar Soup is tagged with `#easy` and `#lunch`, because it's easy to make and I can take it with me for lunch. Truly, revolutionary stuff.
## Querying the data
I'll admit, I didn't come up with this query myself, instead I adapted it from [this Reddit comment](https://www.reddit.com/r/ObsidianMD/comments/zoyviu/comment/j0rw5zr). Here's the full query I use for generating my weekly meal plan:
```js
const tags = ["#recipe", "#lunch", "#easy"]
const recipes = dv.pages(tags.join(' and '));
const maxItems = Math.min(recipes.length, 2);
const randomNum = DateTime.now().toFormat("WW") % recipes.length;
const items = Array(maxItems).fill().map((_, i) => recipes[((randomNum + i) % recipes.length)])
dv.list(items.map((item) => (item.file.link)))
```
It's fairly straightforward. I query for every item that's tagged with `#recipe`, `#easy` and `#lunch`. Then, I pick a "random" number (it's not random - it's the modulo of the number of recipes by the week of the year). As the Reddit comment points out, this way I have repeatable results for each week of the year - otherwise my meal plan would change every time I opened it.
Then, I pick use that number as an index to select at most two of the recipes that match the tags, and then list them. Easy!
![Screenshot showing a heading that says "Meal Plan". The subheading lists two meals under "Lunch": Stir-fried chicken & vegetables, and Lemon & Garlic Butter Shrimp](./src/images/Screenshot-2023-11-30-at-21.24.16.png)
And that's more-or-less it. I'm only generating my lunch meal plan at the minute - I have one for dinner but I've not got many recipes, so unless I want to eat the same thing every night until the end of time it's best to ignore it.

View File

@ -0,0 +1,13 @@
---
title: "Winter&#8217;s definitely here"
date: 2023-11-29T07:19:54
slug: winters-definitely-here
tags: [cycling]
---
Had the first properly wintry commute in this morning (-3ºC but also damp). I knew it was going to be cold, so I made sure to wear my extra-thick gloves, (an Aldi special - they worked perfectly and are definitely worth picking up next time they're in the magical middle aisle).
It's a good job I did, really. I got to work and my gloves (and bike, shoes, jacket, and face) were coated in a layer of frost:
![A pair of cycling gloves. The finger and thumb are covered in a thin layer of frost](./src/images/frosty_gloves.jpg)
The rest of my winter gear worked reasonably well. The only thing I need to get is something to cover my ears - I've tried snoods and balaclavas. Both make my glasses fog up, and the latter also makes it look like I'm trying to hide my face (and I'm not _that_ ugly).

View File

@ -0,0 +1,147 @@
---
title: "Advent of Code 2023: Day Eight"
date: 2023-12-08T10:20:38
slug: advent-of-code-2023-day-eight
tags: [advent-of-code-2023]
---
Time for Day Eight! As always, the code is available [on Git](https://git.lewisdale.dev/lewis/advent-of-code-2023), and the other posts are under the [#AdventOfCode2023 tag](https://lewisdale.dev/post/tag/advent-of-code-2023)
## Part One
So, now we have a map that can take us across the desert. The first line is a series of instructions, that are either "R" (for right), or "L" for left. Below that is a list of location names, to the locations the left and right nodes take you to, like this:
```txt
RL
AAA = (BBB, CCC)
BBB = (DDD, EEE)
CCC = (ZZZ, GGG)
DDD = (DDD, DDD)
EEE = (EEE, EEE)
GGG = (GGG, GGG)
ZZZ = (ZZZ, ZZZ)
```
The task is to find out, by following the instructions, how many steps it takes to get from `AAA` to `ZZZ`.
At first, I went down the path of creating a tree data structure, because that's what this sort-of looks like, and then using that. It worked fine for the tests but then fell over, because the actual input had node names that hadn't already been assigned to a parent, so I couldn't construct it.
Then I realised I was overcomplicating things, and I could just use `Record<string, [string, string]>` and brute-force things:
```javascript
const patternParser = anyCharOf("LR").pipe(manyTill(newline().pipe(exactly(2))));
const nodeNameParser = uniLetter().pipe(or(uniDecimal()), exactly(3), stringify());
const childParser = nodeNameParser.pipe(manySepBy(", "), exactly(2), between("(", ")"));
const nodeParser = nodeNameParser.pipe(then(childParser.pipe(between(" = ", whitespace()))))
const parser = patternParser.pipe(then(nodeParser.pipe(manySepBy(whitespace()))));
type Maybe<T> = T | undefined;
type Instruction = "L" | "R";
type NodeName = string;
type NodeChildren = [Maybe<NodeName>, Maybe<NodeName>];
export class DesertMap {
private readonly pattern: Instruction[];
private map: Record<NodeName, NodeChildren> = {};
constructor(input: string) {
const [pattern, nodes] = parser.parse(input).value;
this.pattern = pattern as Instruction[];
for (const [name, [[leftNode, rightNode]]] of nodes) {
if (!this.map[name]) {
this.map[name] = [undefined, undefined];
}
const children = [leftNode, rightNode];
this.map[name] = children as NodeChildren;
}
}
public stepsToZ(from: string): number {
let step = 0;
let curr = from;
while (!curr.endsWith('Z')) {
const instruction = this.pattern[step % this.pattern.length];
const [left, right] = this.map[curr];
if (instruction === "L" && left) {
curr = left;
} else if (instruction === "R" && right) {
curr = right;
}
if (!curr) return 0;
step++;
}
return step;
}
}
```
And that worked nicely - and didn't even run slowly. On to Part 2!
## Part Two
Now things get interesting. Actually, this map is for ghosts 👻! And naturally, ghosts have the power to follow multiple roads at once to find a destination (I must have missed that bit in school)! So any node that ends in the letter `A` is a starting node, and any that ends in the letter `Z` is an end-node.
My first pass just tried to brute-force it, like I did with part one:
```javascript
public isComplete(keys: string[]): boolean {
return keys.every(k => k.endsWith('Z'));
}
public findCommonSteps(): number {
let step = 0;
let keys = Object.keys(this.map).filter(k => k.endsWith('A'));
while (!this.isComplete(keys)) {
const instruction = this.pattern[step % this.pattern.length];
keys = keys.map(key => {
const [left, right] = this.map[key];
if (instruction === "L" && left) {
return left;
} else if (instruction === "R" && right) {
return right;
}
return key;
})
step++;
}
}
```
This... didn't work. The tests passed, so I've no doubt it would have been eventually correct, but I'd have died of old age before it ended, most likely.
I puzzled for a while on how to do this, but to be honest I was stumped. Luckily, one of my colleagues helpfully pointed me in the direction of using the Lowest Common Multiple of the number of steps, and that worked:
```javascript
const gcd = (a: number, b: number): number => {
if (b === 0) return a;
return gcd(b, a % b);
}
const lcm = (a: number, b: number): number => {
const product = a * b;
return product / gcd(a, b);
}
public ghostStepsToZ(): number {
let keys = Object.keys(this.map).filter(key => key.endsWith('A'));
return keys.map(key => this.stepsToZ(key)).reduce(lcm);
}
```
And that's Day Eight done!

View File

@ -0,0 +1,143 @@
---
title: "Advent of Code 2023: Day Eleven"
date: 2023-12-11T10:18:03
slug: advent-of-code-2023-day-eleven
tags: [advent-of-code-2023]
---
More from [Advent of Code](https://adventofcode.com/2023). Checkout the [other posts](https://lewisdale.dev/post/tag/advent-of-code-2023), or the [Git repository](https://git.lewisdale.dev/lewis/advent-of-code-2023).
I didn't finish Day Ten; I'll add it to my backlog and maybe go and do the ones I've missed when this is all over.
## Part One
We've got a map of the cosmos! Galaxies are `#` characters, and we need to work out the distances between every pair of galaxies. There's a catch though - completely empty rows or columns are doubled, thanks to universal expansion. Here's our input:
```txt
...#......
.......#..
#.........
..........
......#...
.#........
.........#
..........
.......#..
#...#....
```
For Part One, I parsed the input, and then transformed the matrix to include the expaned rows and columns:
```javascript
class Observatory {
private grid: string[][] = [];
private expandedGrid: string[][] = [];
constructor(input: string) {
this.grid = input.split('\n').map(line => line.split(''));
this.expandGrid();
}
private expandGrid() {
const columns = Range(0, this.grid[0].length).filter(column => this.grid.every(row => row[column] === '.'));
const rows = Range(0, this.grid.length).filter(row => this.grid[row].every(column => column === '.'));
columns.forEach((column, index) => {
this.expandedGrid.map(row => row.splice(column + index, 0, new Array(this.expandedGrid[0].length).fill('.')));
});
rows.forEach((row, index) => {
this.expandedGrid.splice(row + index, 0, new Array(this.expandedGrid.length).fill('.'));
});
}
}
```
I'm offsetting the rows and columns by their position in the range to account for the fact that inserting previous columns will alter the indexes.
Then I just get every pair of galaxy, and calculate the distance between them using the Manhattan Distance:
```javascript
public get shortestPaths(): number {
const distances = this.pairs.map(pair => this.distanceBetween(pair[0], pair[1]));
return distances.reduce((sum, distance) => sum + distance, 0);
}
private get pairs(): GalaxyPair[] {
const galaxies = this.expandedGrid.reduce((galaxies, row, rowIndex) => {
row.forEach((galaxy, columnIndex) => {
if (galaxy === '#') {
galaxies.push([rowIndex, columnIndex]);
}
});
return galaxies;
}, [] as Position[]);
return galaxies.reduce((pairs, galaxy, index) => {
return pairs.withMutations(pairs => {
galaxies.slice(index + 1).forEach(otherGalaxy => {
pairs.add([galaxy, otherGalaxy]);
});
})
}, Set<GalaxyPair>()).toArray();
}
public distanceBetween(galaxyOne: Position, galaxyTwo: Position): number {
return Math.abs(galaxyOne[0] - galaxyTwo[0]) + Math.abs(galaxyOne[1] - galaxyTwo[1]);
}
```
And that worked! On to Part Two!
## Part Two
Oh no, it turns out that the spaces didn't expand by a single row/column, but in fact by one million!
Obviously, trying to still mutate the array was never going to work, so instead I just store a list of the columns and rows that should be expanded. Then, when comparing the distances, I get all of the expanded columns and rows that sit between the two galaxies, and multiply the total by the amount of expansion. I then add that number to the Manhattan Distance from Part One:
```javascript
class Observatory {
private grid: string[][] = [];
private expandedRows: number[] = [];
private expandedColumns: number[] = [];
private expandGrid() {
this.expandedColumns = Range(0, this.grid[0].length).filter(column => this.grid.every(row => row[column] === '.')).toArray();
this.expandedRows = Range(0, this.grid.length).filter(row => this.grid[row].every(column => column === '.')).toArray();
}
private range = (a: number, b: number): Seq.Indexed<number> => {
return Range(Math.min(a, b), Math.max(a, b) + 1);
}
public distanceBetween(galaxyOne: Position, galaxyTwo: Position): number {
const expansion = 1_000_000;
const xRange = this.range(galaxyOne[0], galaxyTwo[0]).filter(row => this.expandedRows.includes(row)).toArray().length;
const yRange = this.range(galaxyOne[1], galaxyTwo[1]).filter(column => this.expandedColumns.includes(column)).toArray().length;
const expansions = expansion * (xRange + yRange)
return Math.abs(galaxyOne[0] - galaxyTwo[0]) + Math.abs(galaxyOne[1] - galaxyTwo[1]) + expansions;
}
}
```
I first tested this against the original input and expansion to make sure it worked, and it did! So then I ran it using the expansion value of 1 million aaand... nope. Too high.
After a bit of head-scratching and investigation, I realised I had an off-by-one error. I need to substitute 1 row/column for a million, not add 1 million to it. So, if I reduce my expansion to `999999`, everything works!
```javascript
public distanceBetween(galaxyOne: Position, galaxyTwo: Position): number {
const expansion = 999_999;
const xRange = this.range(galaxyOne[0], galaxyTwo[0]).filter(row => this.expandedRows.includes(row)).toArray().length;
const yRange = this.range(galaxyOne[1], galaxyTwo[1]).filter(column => this.expandedColumns.includes(column)).toArray().length;
const expansions = expansion * (xRange + yRange)
return Math.abs(galaxyOne[0] - galaxyTwo[0]) + Math.abs(galaxyOne[1] - galaxyTwo[1]) + expansions;
}
```
That's Day Eleven done with!

View File

@ -0,0 +1,73 @@
---
title: "Advent of Code 2023: Day Four"
date: 2023-12-04T08:24:54
slug: advent-of-code-2023-day-four
tags: [advent-of-code-2023]
---
Read the [previous Advent of Code posts](https://lewisdale.dev/post/tag/advent-of-code-2023), or checkout the [Git repository](https://git.lewisdale.dev/lewis/advent-of-code-2023).
Day Four was _much_ easier than Day Three, and I'm actually quite pleased with my solution.
## Part One
Given a list of "scratchcards" that look like `Card ID: list of winning numbers | list of scratched numbers`, calculate the score, where the score is 1 if there is 1 match, and then doubles for each subsequent match afterwards.
I realised that is basically just exponents of 2 - `2^0, 2^1` etc. So, I parse each scratchcard into an object, and then to calculate the score I do:
```javascript
get matches() {
return this.numbers.filter(number => this.winningNumbers.includes(number));
}
get score() {
if (!this.matches.length) return 0;
return Math.pow(2, this.matches.length - 1);
}
```
And then just do `Array.prototype.reduce` over my list of scratchcards to get the total score. Pretty tidy, I'm happy with it.
## Part Two
Now the scoring changes. If a scratchcard is a winner, instead of just having a score, it takes the next `n` scratchcards from the list, where `n` is the number of winning numbers matched. If one of the "copied" scratchcards later gets children, that also has to be reflected in the earlier copies (basically, we need to maintain references).
I just made each scratchcard a tree, essentially, with an empty array of children scratchcards, and then pulled references from my Scratchcard list using `Array.prototype.slice`. I then recursively calculated the total size of my scratchcard set, where each scratchcard implements a `size` getter, which returns `1 + sum of children sizes`.
```javascript
class Scratchcard {
...
public setChildren(children: Scratchcard[]) {
this.children = children;
}
get size(): number {
return 1 + this.children.reduce((totalSize, child) => totalSize + child.size, 0);
}
}
export class ScratchcardSet {
private readonly scratchcards: Scratchcard[];
constructor(inputs: string[]) {
this.scratchcards = inputs.map(input => new Scratchcard(input));
this.scratchcards.forEach((scratchcard, index) => {
if (scratchcard.isWinner) {
const children = this.scratchcards.slice(index + 1, index + 1 + scratchcard.matches.length);
scratchcard.setChildren(children);
}
});
}
get totalScore() {
return this.scratchcards.reduce((total, scratchcard) => total + scratchcard.score, 0);
}
get length() {
return this.scratchcards.reduce((totalSize, scratchcard) => totalSize + scratchcard.size, 0);
}
}
```
And that's Day Four complete!

View File

@ -0,0 +1,71 @@
---
title: "Advent of Code 2023: Day Nine"
date: 2023-12-09T07:49:18
slug: advent-of-code-2023-day-nine
tags: [advent-of-code-2023]
---
On to Day Nine of [Advent of Code](https://adventofcode.com). As always, the code is available [on Git](https://git.lewisdale.dev/lewis/advent-of-code-2023), and the other posts are under the [#AdventOfCode2023 tag](https://lewisdale.dev/post/tag/advent-of-code-2023).
## Part One
Something something exposition. We've got a list of lists of numbers that represent how a value changes over time, and we have to calculate the next value in the sequence.
We do this by finding the difference between each values, until the differences are all zeroes, and then working out the next value in order, e.g. `0, 3, 6, 9, 12, 15` becomes `3, 3, 3, 3, 3, 3`, which is then `0, 0, 0, 0`. So the next value in the `3's` list is `3+0`, and the next value in the original sequence is `15+3`, so `18`.
My function to get the next value in the sequence is pretty simple. Really, we don't need to work out when it becomes all zeroes, just when every value is the same. So I apply the function recursively, and add the result to the last value in the sequence:
```javascript
getNextValueInSequence(seq: number[]): number {
if (new Set(seq).size === 1) return seq[0];
const differences = seq.reduce((acc: number[], curr, i, arr) => {
if (i === 0) return acc;
acc.push(curr - arr[i - 1]);
return acc;
}, []);
return this.getNextValueInSequence(differences) + seq[seq.length - 1];
}
```
Then to calculate the totals I just parse the input (which is simple enough that I didn't bother with using `parjs` this time, I just split each line by `' '`, and map `Number` over the result). Then I call `reduce` over the inputs with `getNextValueInSequence` and add the results:
```javascript
export class Oasis {
private readonly sequences: number[][];
constructor(input: string) {
this.sequences = input.split('\n').map(line => line.split(' ').map(Number));
}
public getTotalOfNextValuesInSequences(): number {
return this.sequences.reduce((acc, curr) => acc + this.getNextValueInSequence(curr), 0);
}
}
```
And that was enough to complete Part One.
## Part Two
Now we need to time travel, and work out what the _previous_ value in the sequence was.
This was as simple as taking my original `getNextValueInSequence`, and changing the `+` on the return line to `-`, against `seq[0]`:
```javascript
public getPreviousValueInSequence(seq: number[]): number {
if (new Set(seq).size === 1) return seq[0];
const differences = seq.reduce((acc: number[], curr, i, arr) => {
if (i === 0) return acc;
acc.push(curr - arr[i - 1]);
return acc;
}, []);
return seq[0] - this.getPreviousValueInSequence(differences);
}
public getTotalOfPreviousValuesInSequences(): number {
return this.sequences.reduce((acc, curr) => acc + this.getPreviousValueInSequence(curr), 0);
}
```
And that's Day Nine done! Much easier than yesterdays task.

View File

@ -0,0 +1,97 @@
---
title: "Advent of Code 2023: Day One"
date: 2023-12-01T09:59:44
slug: advent-of-code-2023-day-one
tags: [advent-of-code-2023]
---
[Advent of Code](https://adventofcode.com) is here, and once again I'm going to attempt it. Last year I got to about Day 11 before giving up. This year, I'll _try_ and beat that (but no promises). You can follow my progress with the [Advent of Code tag](https://lewisdale.dev/post/tag/advent-of-code-2023), or by subscribing to the [RSS feed](https://lewisdale.dev/post/tag/advent-of-code-2023/feed/), and the code is on my [Git](https://git.lewisdale.dev/lewis/advent-of-code-2023).
As expected, this post (and the subsequent ones) will contain lots of spoilers for Advent of Code. Read at your own peril.
## Language
Originally, I was planning to do it in PICO-8. But that sounded too much like hard work, so I'm doing it in Typescript.
## Part One
The first part was relatively easy, given a bunch of strings that may-or-may not contain numbers, pick the first and last number, smoosh them together, and then sum them all. An example string might be `gasdad15asd5`, which should produce `15`.
Easy-peasy. I just use a really simple Regex, and then pick the results out:
```js
function parseLine(line: string): number {
const pattern = /\d/g;
const matches = line.match(pattern);
if (!matches.length) return 0;
const valueStr = `${matches[0]}${matches[matches.length - 1]}`;
return parseInt(valueStr);
}
function calculate() {
const lines = fs.readFileSync('./path/to/input.txt').toString('utf-8').split('\n');
console.log(lines.reduce((total, line) => total + parseLine(line), 0));
}
```
That gave me the correct answer, and I was able to move onto Part 2.
## Part Two
This was a doozy. Now, we also need to parse the written forms of the numbers, e.g. `one`, `two`, `three`. I extended my Regex to capture these too, and then added a parser function that would convert the written number to the digit:
```js
function parseDigit(digit: string): string {
switch (digit) {
case "one": return "1";
case "two": return "2";
case "three": return "3";
case "four": return "4";
case "five": return "5";
case "six": return "6";
case "seven": return "7";
case "eight": return "8";
case "nine": return "9";
default: return digit;
}
}
function parseLine(line: string): number {
const pattern = /\d|one|two|three|four|five|six|seven|eight|nine/g;
const matches = line.match(pattern);
if (!matches.length) return 0;
const converted = matches.map(parseDigit);
const valueStr = `${converted[0]}${converted[converted.length - 1]}`;
return parseInt(valueStr);
}
```
Everything looked good, the tests passed but... nope. Wrong result.
### Regex fun
It turned out that the test cases had no examples for when the digits overlap. For example, `oneight` should be `18`, but I was getting `11`. That meant that I was always getting the wrong result. This is because standard global pattern matching _consumes_ the string - so once I've parsed `one`, the remaining string is `ight`, which gets discarded.
To get around this, I had to add a look-ahead with capture group to my Regex, and then use `string.matchAll()`, rather than `string.match()` - because `string.match()` ignores capture groups:
```js
function parseLine(line: string): number {
const pattern = /(?=(\d|one|two|three|four|five|six|seven|eight|nine))/g;
const matches = line.matchAll(pattern);
if (!matches) return 0;
const converted = [...matches].flatap(match => match.map(parseDigit)); // Matches is an iterator of RegExpMatchArrays, this converts it to String[]
const valueStr = `${converted[0]}${converted[converted.length - 1]}`;
return parseInt(valueStr);
}
```
And finally, that worked. This made it sound easier than it was, in reality I spent a good 30-40 minutes scratching my head, and even switched languages (originally I was using Rust, like last year). But anyway, it's done, and at least I can move onto Day Two.

View File

@ -0,0 +1,147 @@
---
title: "Advent of Code 2023: Day Seven"
date: 2023-12-07T13:21:34
slug: advent-of-code-2023-day-seven
tags: [advent-of-code-2023]
---
Back to Advent of Code! This post contains spoilers. You can see the rest of the [Advent of Code posts](https://lewisdale.dev/post/tag/advent-of-code-2023), or checkout the [Git repository](https://git.lewisdale.dev/lewis/advent-of-code-2023).
## Part One
You're playing a game of cards! Each game looks like a set of hands, with an associated bet:
```txt
32T3K 765
T55J5 684
KK677 28
KTJJT 220
QQQJA 483
```
Each game of cards is scored based on the value of the hand, e.g. "Five of a kind" is the highest-scoring card. In the event that two hands have the same score, the individual cards are compared until a higher-scoring card is found.
The task is to order the hands by their scores, and then multiply the "bets" by their individual ranks. For example, if I have the highest-scoring card, I'd have the top rank (rank 5 in this case), and I'd multiply my bet by that amount. What's the total bets received, calculated by multiplying each bet by it's rank?
So to begin with, I get my trusty parsing library out and write the world's most pointless parser:
```javascript
const cardParser = anyChar().pipe(manyTill(space()), stringify());
const bidParser = int().pipe(between(whitespace()));
const parser = cardParser.pipe(then(bidParser), manySepBy(whitespace()));
const rows: [string, number][] = parser.parse(input).value;
```
Then I map each parsed value to a `CamelCard`, which also calculates the score ahead of time:
```javascript
export class CamelCard {
private hand: Record<string, number>;
public readonly score: number;
constructor(protected readonly card: string) {
this.hand = this.card.split('').reduce((cards, char) => {
if (!cards[char]) {
cards[char] = 0;
}
cards[char] += 1;
return cards;
}, {} as Record<string, number>)
this.score = this.calculateScore();
}
private calculateScore(): number {
const cards = Object.values(this.hand).sort((a, b) => b-a);
if (isEqual(cards, [1, 1, 1, 1, 1])) return 1;
if (isEqual(cards, [2, 1, 1, 1])) return 2;
if (isEqual(cards, [2, 2, 1])) return 3;
if (isEqual(cards, [3, 1, 1])) return 4;
if (isEqual(cards, [3, 2])) return 5;
if (isEqual(cards, [4, 1])) return 6;
if (isEqual(cards, [5])) return 7;
return 0;
}
}
```
Basically, I bucket each found "card' into a record, and count the number of times it occurs. Then to get the score, I just order the values by descending count, and compare the array to what I would expect for each score.
Then to compare them, I check the scores. If they're different, I just return the difference. If they're equal, I iterate over the hand and look up the index of the score in an ordered array of the cards, and just compare the indexes:
```javascript
const CardLetterScores = [ '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A'];
public compare(other: CamelCard): number {
const diff = this.score - other.score;
if (diff !== 0) return diff;
for (const [a, b] of zip(this.card.split(''), other.card.split('')) as [string, string][]) {
if (a !== b) {
return CardLetterScores.indexOf(a) - CardLetterScores.indexOf(b)
}
}
return 0;
}
```
This lets me then sort my hands, and compute the winnings:
```javascript
get winnings(): number {
this.cards.sort(([a], [b]) => a.compare(b));
return this.cards.reduce((total, [_, value], index) => total + (value * (index + 1)), 0);
}
```
Part one done!
## Part Two
Okay now the `J` cards are jokers, which are now the lowest-valued cards in the hand when it comes to a direct comparison. But, they can also be redistributed within the hand to become "any" card, so that you can have a stronger hand. Basically they're a valueless wildcard.
So to do this, I just move the letter `J` to the start of my `CardLetterScores` array, which handles the value case. Then to redistribute them, I pull them out of the hand, find the next card with the highest number of instances, and give them all the J's. I do this using reduce, and initialise it with the `J` key and a 0-value to handle the instance that there are only J's in the hand. That way we don't accidentally double-up the J's if that's all there is:
```javascript
const CardLetterScores = ['J', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'Q', 'K', 'A'];
private redistributeJ(): void {
if ('J' in this.hand) {
const js = this.hand.J;
const withoutJ = omit(this.hand, 'J') as Record<string, number>;
const [mostCommon, mostCommonValue] = Object.entries(withoutJ).reduce(([maxKey, maxValue], [key, value]) => {
if (value > maxValue) return [key, value];
return [maxKey, maxValue];
}, ['J', 0]);
withoutJ[mostCommon] = mostCommonValue + js;
this.hand = withoutJ;
}
}
private calculateScore(): number {
this.redistributeJ();
const cards = Object.values(this.hand).sort((a, b) => b-a);
if (isEqual(cards, [1, 1, 1, 1, 1])) return 1;
if (isEqual(cards, [2, 1, 1, 1])) return 2;
if (isEqual(cards, [2, 2, 1])) return 3;
if (isEqual(cards, [3, 1, 1])) return 4;
if (isEqual(cards, [3, 2])) return 5;
if (isEqual(cards, [4, 1])) return 6;
if (isEqual(cards, [5])) return 7;
return 0;
}
```
And that was Part Two done. Wasn't too difficult, really. My `calculateScore` function is a bit so-so, but it's _fine_, and it runs fast enough anyway.

View File

@ -0,0 +1,88 @@
---
title: "Advent of Code 2023: Day Six"
date: 2023-12-06T08:28:43
slug: advent-of-code-2023-day-six
tags: [advent-of-code-2023]
---
Back to Advent of Code! This post contains spoilers. You can see the rest of the [Advent of Code posts](https://lewisdale.dev/post/tag/advent-of-code-2023), or checkout the [Git repository](https://git.lewisdale.dev/lewis/advent-of-code-2023).
I missed yesterdays Advent of Code, so I'll go back and do it later. But, until then, here's Day Six.
## Part One
We're having a boat race! We've got some input that looks like:
```txt
Time: 7 15 30
Distance: 9 40 200
```
If we pair each column up, we get a Time/Distance pairing that tells us how long the race lasts, and how far we go. We can "charge up" our boat, by holding down the button, which gives us an extra 1mm/ms velocity. The task: work out how many different ways there are to win each race, and then multiply the results together.
Firstly, I'm using [ParJS](https://github.com/GregRos/parjs) as an input parser, because I wanted something similar to [Rust's Nom](https://docs.rs/nom/latest/nom/) that I used last year.
Creating the parser was simple enough:
```javascript
const timeName = string('Time:').pipe(then(spaces1()));
const distanceName = string('Distance:').pipe(then(spaces1()));
const timeParser = int().pipe(manySepBy(whitespace().pipe(exactly(2))), between(timeName, whitespace()));
const distanceParser = int().pipe(manySepBy(whitespace().pipe(exactly(2))), between(distanceName, whitespace()));
const parser = timeParser.pipe(then(distanceParser));
export class BoatRace {
private races: Race[] = [];
constructor(input: string) {
const [times, distances] = parser.parse(input).value;
this.races = zip(times, distances) as Race[];
}
}
```
Then I just used Range from [immutable.js])(https://immutable-js.com/) to create all of the values between `0` and `time`, and filtered it using a function that tested whether the new velocity was enough to beat the distance with the remaining time. The resulting array length was my number of possible solutions:
```javascript
numberOfWinningMethods = (race: Race): number => {
const [time, distance] = race;
const canWin = (holdingTime: number) => ((time - holdingTime) * holdingTime) > distance;
const range = Range(0, time).filter(canWin).cacheResult();
return range.size || 0;
}
public totalNumberOfWaysToBeatRace(): number {
return this.races.map(this.numberOfWinningMethods).reduce((total, value) => total * value, 1);
}
```
## Part Two
Oh no, bad kerning strikes again! It turns out that it's not multiple races - it's a single race and the whitespace is just throwing us off. So instead of `7 15 30`, the time is actually `71530`.
All I actually had to change here was my parser:
```javascript
const numbersParser = anyCharOf("0123456789").pipe(manySepBy(whitespace().pipe(exactly(2))), stringify());
const timeParser = numbersParser.pipe(between(timeName, whitespace()));
const distanceParser = numbersParser.pipe(between(distanceName, whitespace()));
export class BoatRace {
private readonly race: Race;
constructor(input: string) {
const [times, distances] = parser.parse(input).value;
this.race = [parseInt(times), parseInt(distances)];
}
public totalNumberOfWaysToBeatRace(): number {
return this.numberOfWinningMethods(this.race);
}
}
```
And that was it! It ran first time! I'm sure there are more efficient solutions that you can get by working out the upper & lower bounds, but if I wanted to do maths by hand I wouldn't have bought a computer.

View File

@ -0,0 +1,107 @@
---
title: "Advent of Code 2023: Day Three"
date: 2023-12-03T12:23:59
slug: advent-of-code-2023-day-three
tags: [advent-of-code-2023]
---
As before, this post contains spoilers. You can follow all of the Advent of Code posts using the [Advent of Code tag](https://lewisdale.dev/post/tag/advent-of-code-2023/), and the code is available on [Git](https://git.lewisdale.dev/lewis/advent-of-code-2023).
Thanks Day Three, I hate it.
## Part One
For part one, we had to take a schematic of an engine, with symbols for components, and numbers for "part numbers", and then find all of the part numbers adjacent to a component, and sum them together.
I really struggled to get the right data structure for this while still being able to look it up properly. In the end, I matched every integer, stuck it in a 2D array, at each index that int would occupy (e.g. '245' starting at position 3, line 3 would be in indexes `[3][3]`, `[3][4]`, and `[3][5]`.
Then I found each symbol, checked the adjacent spots for numbers, removed the duplicates, and summed them. The code is _horrible_.
```javascript
export class Engine {
constructor(private readonly parts: number[][], private readonly schematic: string[][]) {
}
public static create(input: string): Engine {
const lines = input.split('\n').filter(Boolean);
const partRegex = /\d+/g;
const symbolRegex = /[^a-zA-Z\d\.]/g;
const parts : number[][] = new Array(lines.length).fill(0).map(() => new Array(lines[0].length));
const symbols: string[][] = new Array(lines.length).fill(0).map(() => new Array(lines[0].length));
lines.forEach((line, lineNumber) => {
matchAllAndThen(line, partRegex, (match, index) => {
const parsedNumber = parseInt(match, 10);
for (let i = index; i < index + match.length; i++) {
parts[lineNumber][i] = parsedNumber;
}
});
});
lines.forEach((line, lineNumber) => {
matchAllAndThen(line, symbolRegex, (match, index) => symbols[lineNumber][index] = match);
});
return new Engine(parts, symbols);
}
public sumPartNumbers(): number {
const partsList = this.schematic.flatMap((row, rowIndex) =>
row.map((symbol, index) => {
if (!symbol) return symbol;
const partIndex = [
[rowIndex - 1, index - 1],
[rowIndex - 1, index],
[rowIndex - 1, index + 1],
[rowIndex, index - 1],
[rowIndex, index + 1],
[rowIndex + 1, index - 1],
[rowIndex + 1, index],
[rowIndex + 1, index + 1]
];
return Array.from(new Set(partIndex.filter(([rowNum, col]) => rowNum >= 0 && rowNum <= this.schematic.length && index >= 0 && index <= row.length)
.map(([rowNum, column]) => {
return this.parts[rowNum][column]
}).filter(Boolean)))
.reduce((total, val) => total + val,0);
})
) as number[];
return partsList.reduce((total, partNumber) => total + partNumber, 0);
}
```
But whatever, it works.
## Part Two
This was easier - now we just need to find `*` symbols with exactly two adjacent numbers. I added a flag to my `create` function that only matched `*` symbols, and then filtered the list of part numbers to ones where `length === 2`. Finally, I reduced the list down and summed the "ratios":
```javascript
public gearRatioSums(): number {
return this.schematic.flatMap((row, rowIndex) =>
row
.map((symbol, index) => {
const partIndex = [
[rowIndex - 1, index - 1],
[rowIndex - 1, index],
[rowIndex - 1, index + 1],
[rowIndex, index - 1],
[rowIndex, index + 1],
[rowIndex + 1, index - 1],
[rowIndex + 1, index],
[rowIndex + 1, index + 1]
];
return Array.from(new Set(partIndex.filter(([rowNum, col]) => rowNum >= 0 && rowNum <= this.schematic.length && index >= 0 && index <= row.length)
.map(([rowNum, column]) => {
return this.parts[rowNum][column]
}).filter(Boolean)))
})
).filter(list => list.length === 2)
.reduce((total, [a, b]) => {
return total + (a * b)
}, 0);
}
```
Today took me _way_ longer than I'd have expected. Feels like this year is surprisingly hard 😅

View File

@ -0,0 +1,137 @@
---
title: "Advent of Code 2023: Day Two"
date: 2023-12-02T09:30:06
slug: advent-of-code-2023-day-two
tags: [advent-of-code-2023]
---
On to Day Two of Advent of Code. As before, this post contains spoilers for the solution. You can follow all of the Advent of Code posts using the [Advent of Code tag](https://lewisdale.dev/post/tag/advent-of-code-2023/), and the code is available on [Git](https://git.lewisdale.dev/lewis/advent-of-code-2023).
## A bit of extra setup
Yesterday I was rushing, because it was my second language-change of the morning (PICO-8 -> Rust -> Typescript), so I didn't bother setting up a test framework. But today I've got a bit more time, so I made sure to add `ts-jest` to the project, so I can actually write "proper" tests.
I haven't gone back to Day One and added the tests. I have no idea if I will or not - I might just leave it as-is for posterity.
## Part One
Given a set of strings that represent games where each game has a round where cubes are pulled from a bag. The data looked like this: `Game 1: 2 red, 5 blue; 6 red, 11 green, 2 blue; 2 red, 4 green`.
Then calculate which game IDs would be possible given certain limits (in this case, available cubes). The limits are: `green: 13, red: 12, blue: 14`.
My solution was pretty simple, I parsed each game into an object, that was shaped as:
```
{
id: number;
blue: number;
red: number;
green: number;
}
```
I constructed the object:
```typescript
const parseGame = (input: string): Game => {
const [gameId, rounds] = input.split(':');
const id = parseInt(gameId.split(' ')[1], 10);
const game = rounds.split(';').reduce((game, round) => {
const colors = round.split(',').map(color => color.trim());
colors.forEach(color => {
const [count, colorName] = color.split(' ');
game[colorName as 'blue' | 'green' | 'red'] += parseInt(count, 10);
});
return game;
}, {
blue:0 ,
red: 0,
green: 0
});
return {
id,
...game
}
}
```
And then filtered out games that had cubes beyond the limits, and summed the ids:
```typescript
const sumPossibleGames = (games: Game[], limits: Cubes): number => {
const possibleGames = games.filter(game => {
return Object.values(limits).every(color => {
return game[color as keyof Cubes] <= limits[color as keyof Cubes];
});
});
});
return possibleGames.reduce((total, game) => total + game.id, 0);
}
```
And that worked! Onto part two
## Part Two
Okay now we have to work out the minimum required cubes for each game, multiply them together, and then calculate the sum of that for every game.
Which is where I realised my data model was wrong. I needed to change it so that each game held the individual rounds.
So now, my model is:
```typescript
type Cubes = {
blue: number;
red: number;
green: number;
}
type Game = {
id: number;
rounds: Cubes[];
}
```
And my parsing is slightly different:
```typescript
const parseGame = (input: string): Game => {
const [gameId, roundsInput] = input.split(':');
const id = parseInt(gameId.split(' ')[1], 10);
const rounds = roundsInput.split(';').map(round => {
return round.trim().split(',').map(cube => cube.trim()).reduce((total, cube) => {
const [amount, color] = cube.split(' ');
total[color as keyof Cubes] = parseInt(amount, 10);
return total;
}, {blue: 0, green: 0, red: 0} as Cubes);
});
return {
id,
rounds
}
}
```
But now I'm able to calculate the results and move on with my day:
```typescript
const minimumCubesRequiredForGame = (game: Game): Cubes => {
return game.rounds.reduce((total, round) => {
Object.keys(round).forEach(color => {
total[color as keyof Cubes] = Math.max(total[color as keyof Cubes], round[color as keyof Cubes]);
});
return total;
}, {blue: 0, green: 0, red: 0} as Cubes);
}
const calculateMinimumCubePowers = (games: Game[]): number => {
const minimumCubes = games.map(minimumCubesRequiredForGame);
return minimumCubes.reduce((total, cubes) => {
return total + (cubes.blue * cubes.green * cubes.red);
}, 0);
}
```
And that was enough to complete Day Two. I'm glad it was much simpler than Day One at least 😅

View File

@ -0,0 +1,15 @@
---
title: "How to tell if your product needs a &#8220;Wrapped&#8221; feature"
date: 2023-12-21T18:28:29
slug: how-to-tell-if-your-product-needs-a-wrapped-feature
---
It's December, and that means every product has launched their Wrapped slideshows, which give you a little breakdown of how your usage compares to all the other users of any given product. I especially appreciated my banking app telling me just how many times I'd been to McDonald's this year.
In case you're wondering if your product should implement such a feature, I've put together a handy flow chart to help you decide:
![Needlessly snarky flowchart with the title "Does my product need a "wrapped" feature?". There is only one decision point, "Is My Product Spotify?" - the "Yes" flow goes to a point saying "Probably Not", and "No" leads to "Absolutely Not"](./src/images/spotify-flow.png)
Hope this helps!

View File

@ -0,0 +1,17 @@
---
title: "Bicycle woes"
date: 2023-02-25T09:22:58
slug: bicycle-woes
---
I've been loving my bike ever since it was gifted to me: a black Specialized Allez on Fulcrum Racing 5 wheels. It goes like shit off a hot shovel, because it's so lightweight (I think it clocks in at about 8kg unladen).
But the problem with being an amateur cyclist who's never been particularly skilled at mechanics is that it was too easy to put off basic maintenance tasks until they were a bit too late.
Last week, I decided I'd replace the chain - easy enough, even a newbie like me can do it. But the problem was that I'd left it too long and the worn chain had then worn down the cassette (the set of cogs on the rear wheel). The wear is inevitable, especially with the mileage I do; by my estimate I cover a little over 4000 miles/6400km a year.
Then when changing the cassette, the shop I took it to mentioned that there was some wear on the bearings - the bits that help the wheel spin smoothly and stay true (straight). So I went to get them serviced, only to then be told that the whole hub on the rear is seized. So I'd either have to pay to get it all replaced (££££), or buy a new wheel (also ££££).
So, what should have been a quick-and-easy job to replace the chain has turned into an entirely new rear wheel and cassette, and cost me in the vicinity of £200. Wear on these parts is normal, and replacing them _is_ inevitable, but I could have extended the life of these parts and saved a lot of money if I'd kept up with maintenance.
I've picked up a copy of [Zinn and the Art of Road Bike Maintenance](https://www.bigandtallbike.com/zinn-the-art-of-road-bike-maintenance-5th-edition.html), and am going to commit to a minor maintenance routine (clean and frequent lubrication), with some bigger replacements more frequently. Hopefully this will save me money in the long run, and I'll get better at bike repairs in general.

View File

@ -0,0 +1,16 @@
---
title: "Signing up for my first cycling event"
date: 2023-02-06T08:00:48
slug: signing-up-for-my-first-cycling-event
---
I registered for my first ever cycling event over the weekend. I was thinking about a duathlon/triathlon, but as I'm not much of a runner or swimmer I thought purely cycling would be the best thing for me. So I registered for the [Salford - Blackpool 100k](http://www.bike-events.co.uk/Ride.aspx?id=2721&n=y).
I'll be doing it to raise money for [The Christie](https://www.christie.nhs.uk/about-us), a cancer research & care trust based in Manchester. 'm waiting on my fundraising pack, but once it's arrived I'll post a JustGiving page, any and all donations will be greatly appreciated!
I'm both nervous and excited, I've never done a distance like this before - I've done 50k with no stops, but that was mostly because I got lost. I'll need to start training pretty much now, but it should be good fun! Hoping I'll enjoy it enough to do a few more in the following year-or-so.
**Update**: I'm collecting donations for my bike ride on [JustGiving](https://www.justgiving.com/fundraising/lewis-dale-bikes)

View File

@ -0,0 +1,133 @@
---
title: "Using WebC for progressively-enhanced UI elements"
date: 2023-02-23T10:17:01
slug: using-webc-for-progressively-enhanced-ui-elements
---
Now I'm back in Eleventy-land, I thought I'd give [WebC](https://github.com/11ty/webc) a go. For those unaware, it's a templating language that generates Web Components, complete with asset bundling.
But unlike regular Web Components, you can build things that aren't completely reliant on Javascript. Because WebC is server-side rendered initially, you can provide fallback elements that will still render if Javascript fails or is disabled.
To try it out, I created a simple live Markdown editor. It's simply a text area that lets you input Markdown, and then live-renders it to the side. I've got a new 11ty project, where every file is using `webc` extensions, and global components are configured. I've created a new file, `_components/rich-textarea.webc`.
To start with, I'll add my markup, which is just a textarea and a div for showing the output.
```html
<template webc:root>
<textarea :id="this.uid" :name="name" @raw="content"></textarea>
<div class="rich-text-root" aria-live="polite">
</div>
</template>
```
By giving the `webc:root` attribute to the `template`, when it renders WebC will just strip out the tags and leave me with the textarea and div inside my Web Component tag.
Now in my `index.webc` I can use it:
```html
<main>
<rich-textarea name="my-name" content="# This is some raw content"></rich-textarea>
</main>
```
And that renders, albeit a bit un-inspiring:
![A textarea input on a white background. The input says "# This is some raw content".](./src/images/Screenshot-2023-02-23-at-09.32.50-e1677144917908-1024x211.png)
## Styling
Next, up, I want to add some styles to my component. I can include these directly in my `webc` file and they'll be bundled at build time:
```html
<style webc:scoped>
:host {
display: flex;
flex: 1;
width: 100%;
align-items: stretch;
justify-items: stretch;
}
:host textarea {
background: white;
box-sizing: border-box;
flex: 1;
font-size: 2rem;
padding: 0.5rem;
resize: none;
}
:host:not(:defined) .rich-text-root {
display: none;
}
:host .rich-text-root {
flex: 1;
padding: 0.5rem;
}
</style>
```
The interesting parts are the `:host` and the `:defined` pseudoclasses. `:host` refers to the webcomponent itself, I've cheated a bit and used it to give some flex styling to help make things fullscreen, but I'm not sure if that's best practice or not.
The `:defined` pseudoclass tells us if the Web Component has been defined or not - with Javascript disabled, this will always be false and so we want to hide the render area when that's the case. It's also false right now because we haven't added any javascript.
So, with this CSS, we now get a full-width textarea:
![A large textarea on a white background, containing the text "# This is some raw content"](./src/images/Screenshot-2023-02-23-at-09.41.32-1024x525.png)
## Adding interactivity
This is the final bit! Now, all we need is a bit of Javascript to make things interactive. We can add a `script` tag to our component, and in that tag use `window.customElements.define` to define our Web Component:
```html
<script>
window.customElements.define("rich-textarea", class extends HTMLElement {
connectedCallback() {
this.renderer = new markdownit();
this.content = this.attributes.content.value;
this.textarea = this.querySelector('textarea');
this.root = this.querySelector(':scope > .rich-text-root');
this.textarea.addEventListener('change', (e) => this.update(e));
this.textarea.addEventListener('keyup', (e) => this.update(e));
this.root.innerHTML = this.renderer.render(this.content);
}
update(e) {
this.content = this.textarea.value;
this.root.innerHTML = this.renderer.render(this.content);
}
});
</script>
```
So, when the Javascript has loaded and `connectedCallback` runs, we're getting the content we've passed as a prop, and using the [MarkdownIt](https://github.com/markdown-it/markdown-it) library to transform it to HTML and render it within our `rich-text-root`.
Then, whenever the user triggers a `change` or `keyup` event on the textarea, we update that content again, giving us a live reload.
The nice part about this is that because we're using WebC, the markup it generates already includes a fallback:
```html
<!-- _site/index.html -->
<rich-textarea
name="my-name"
content="# This is some raw content"
class="wa1k3zmq0"
>
<textarea id="webc-hf3zp" name="my-name">
# This is some raw content
</textarea>
<div class="rich-text-root" aria-live="polite"></div>
</rich-textarea>
```
That means that, if Javascript is enabled then I get the full live-edit functionality:
![A page with a textarea on one half of the page, and the rendered output on the other.](./src/images/Screenshot-2023-02-23-at-10.10.19-1024x523.png)
And when we disable Javascript, we just get the textarea on it's own as a fallback:
![The same textarea, now taking up the full page size](./src/images/Screenshot-2023-02-23-at-10.15.35-1024x525.png)

View File

@ -0,0 +1,50 @@
---
title: "Using WordPress as a Markdown editor"
date: 2023-02-18T21:56:35
slug: using-wordpress-as-a-markdown-editor
---
The eagle-eyed among you will notice that my website's had a slight refresh - and by that I mean I got bored of that ZX Spectrum theme roughly 45 seconds after publishing it.
I've also switched back to Eleventy! I'm still using Wordpress though, because I didnt' want to migrate. I did, however, want to make it _easier_ to migrate in the future. I've also got a nice Markdown configuration that I'm quite comfortable with, so I'd like to use that too.
So, I need to somehow use Wordpress as a markdown editor... sounds silly, probably is, but I'm gonna do it anyway.
I already had the [WP Githuber MD](https://github.com/terrylinooo/githuber-md) extension installed from when I originally migrated to Wordpress. It's quite handy because it comes with a utility that converts between Wordpress posts and Markdown. It achieves this by storing the unrendered Markdown content alongside posts.
That solves the first half of the problem - I just needed to go through and convert all my posts, which was a pretty quick job.
Next, I need to be able to _get_ that Markdown content. The [Wordpress REST API](https://developer.wordpress.org/rest-api/) obviously doesn't return this by default, and the plugin doesn't extend the API for us. So, I'll need to write a little plugin myself that uses `register_rest_field` to add the markdown content to the post:
```php
add_action( 'rest_api_init', 'add_md_to_rest');
function add_md_t_rest() {
register_rest_field( 'post', 'markdown', array(
'get_callback' => function( $post_arr ) {
return html_entity_decode(get_post_field('post_content_filtered', $post_arr['id'], 'edit'), ENT_QUOTES | ENT_HTML5, 'UTF-8');
},
'update_callback' => function( $karma, $comment_obj ) {
return true;
},
'schema' => array(
'description' => __( 'Markdown content.' ),
'type' => 'string'
),
) );
}
```
Now, when I fetch new posts from my Wordpress API, they have a `markdown` field, containing the unrendered markdown.
Then, finally, in my template I have a filter that directly calls `markdownIt.render` on a passed string, and I use that to render the content of my post:
```javascript
eleventyConfig.addFilter('md', content => content ? markdownLib.render(content) : "");
```
{% raw %}
```twig
{{ post.markdown | md | safe }}
```
{% endraw %}

View File

@ -0,0 +1,11 @@
---
title: "Well, that was short-lived"
date: 2023-02-03T09:09:23
slug: well-that-was-short-lived
---
I only just started allowing this site to cross-post to Twitter, for the sake of syndication. But, given that Twitter have decided to close free API access, I can't imagine that [Bridgy](https://brid.gy) will be able to continue sharing to it.
This change doesn't hugely impact me, by-and-large I don't use Twitter anyway, especially after they stopped third-party clients working (RIP Tweetbot, you'll be missed). I've seen quite a few indie developers who rely on their Twitter apps for their livelihood though. Dependent on the pricing, it's likely that a lot of these people will lose their income. Once again, the Twitter CEO proves that he has no idea what made the platform work in the first place.
Anyway, that was nice while it lasted (well, it wasn't really but hey). As always, I'm on [Mastodon](https://dapchat.online/@lewisdaleuk), which is where I actually post anyway. My Twitter account will probably go back to being (largely) dormant.

View File

@ -0,0 +1,56 @@
---
title: "Bringing my omg.lol Now page into Eleventy"
date: 2023-03-07T09:52:55
slug: bringing-my-omg-lol-now-page-into-eleventy
---
[Robb Knight](https://rknight.me) has [this great Javascript tool](https://omgnow.rknight.me/) for embedding your [omg.lol](https://omg.lol) /now page in another page.
I thought it was pretty cool to use, but because I'm allergic to client-side Javascript, I wanted to port it to Eleventy so that I could generate it once-per-build. It was actually pretty simple to do, because the [server-side source for omgnow.js is on Github](https://github.com/rknightuk/omgnow.js). So this was basically a port-to-JS job.
I have to files, `now.md` and `now.11tydata.js`. My `now.md` is pretty simple, it just looks like this:
```markdown
---
title: My /now page
layout: page.njk
---
{{ content | safe }}
```
And then `now.11tydata.js` uses `eleventyComputed` values to retrieve and parse the `/now` page content from the [omg.lol API](https://api.omg.lol/):
```javascript
module.exports = {
eleventyComputed: {
content: async () => {
// Retrieve the /now page from the server
const response = await fetch("https://api.omg.lol/address/lewis/now");
const body = await response.json();
// Convert the unix timestamp to a JS datetime timestamp
const updated = new Date(body.response.now.updated * 1000);
let content = body.response.now.content;
// Replace the last-updated tag
content = content.replace("{last-updated}", `<span class="now_updated">Updated ${updated.toLocaleDateString('en-GB', { dateStyle: "medium" })}</span>`);
// Strip out omg.lol-specific tags
content = content.replaceAll(/{[^}]*}/g, "");
// remove comments
content = content.replaceAll(/\/\*.*?\*\//g, "");
// Remove everything before the --- Now --- marker, because I handle page titles and headings in 11ty
if (content.includes("--- Now ---")) {
const [before, after] = content.split("--- Now ---");
content = after;
}
return content;
}
}
}
```
And there you have it! Robb's source made this 1000x easier that it would have been. The only thing I need to do is stop stripping out the omg.lol icon sets, and instead replace them with the icons I actually have - my markdown config mostly duplicates the same fontawesome set.

View File

@ -0,0 +1,11 @@
---
title: "Hunting for inspiration"
date: 2023-03-06T20:37:34
slug: hunting-for-inspiration
---
I feel like my posting on here has slowed down a lot in the last month. In part that's because I've been a lot busier at work, but I've also found myself not having _anything_ to write about.
Well, that's not true. There are 3-or-4 posts lingering in my drafts right now, but I've not finished them because the topics don't interest me enough to really want to sit down and write about them.
I'll be glad when my next silly side project comes along and I can write a few posts about that. I think I'm happiest when I've got something to focus on that's not just my job: programming started as a hobby and I do like that it (usually) remains one.

View File

@ -0,0 +1,15 @@
---
title: "I don&#8217;t care about ChatGPT"
date: 2023-03-31T09:43:19
slug: i-dont-care-about-chatgpt
---
I'm starting to feel like I'm in the minority here, because so many people are talking about it, but I genuinely don't have any interest in GPT or any of it's derivatives.
I get that it looks like magic to a lot of people, and almost feels like actually _intelligent_ artificial intelligence, but I get the distinct impression it's just a façade. Scratch below the surface and you'll find yet another deeply-flawed technical product.
There are plenty of reasons to be worried about it, most of them to do with the ethics of a model trained on the internet, and people's predisposition to leaning heavily on this as a tool. I worry that it's going to be - or already is being - used to make decisions that impact people.
But ultimately it doesn't _excite_ me the way it excites other people. I've found a couple of little uses for it, like generating some bedtime stories for my kid, but nothing that revolutionises my life.
Ultimately I think this is going to be the Next Big Thing that eventually turns out to have not been that big of a thing at all, the world will move on once the tech bros start banging on about something else shiny.

View File

@ -0,0 +1,13 @@
---
title: "Server migration"
date: 2023-03-12T09:13:52
slug: server-migration
---
It took me a fair while, but I've finished migrating my VPS off Linode now - finally. I was spending ~£20/month on servers, and that's set to increase by another 20% this month. I've now gone for [Contabo](https://contabo.com), who are pretty reputable and were offering a really good package - 4-CPU, 8GB RAM, 400GB SSD VPS for roughly £9 a month, once VAT is factored in. So now I've got more resources, for less than half the price.
Migration took longer than I thought it would - as these things tend to - but that's mostly because I used it as an opportunity to also fix a few bits of tech debt I'd built up through hamfisted attempts at tacking features on. Naturally, this meant doing a lot of things by hand (and this time remembering to write down what I did for next time).
Now my frontend & CMS are on separate domains, rather than co-located in the same directory. It makes for a cleaner split, and will make it easier for me to move away from Wordpress at some point.
Next up is to also move my Mastodon instance onto this server, and then decommission that one, after which I won't need to have anything on Linode at all, and can safely delete that account.

View File

@ -0,0 +1,19 @@
---
title: "The Mini F9 Camera is everywhere (and it&#8217;s incredibly bad)"
date: 2023-03-04T19:39:13
slug: the-mini-f9-camera-is-everywhere-and-its-incredibly-bad
---
This is another post about cycling, but my parents bought me a helmet-mounted camera to wear when I cycle. It's kind-of a necessity, particularly when commuting, because motorists are lunatics and cycling infrastructure is inadequate in almost every part of the UK.
Unfortunately, they were duped into buying a [Mini F9](https://www.amazon.co.uk/Portable-Waterproof-Suitable-Bicycles-Motorcycles/dp/B08HWQ5VC3). It's an extremely common bullet-style camera that's almost ubiquitous on Amazon. It's been around since roughly 2014, from what I can tell, and purports to record sound, be waterproof, and support 1080p recording both during the day and at night.
Oh, and it sucks.
Because we're just coming out of winter, most of my cycling has been in the dark. And it's been almost completely useless. Several times I've gone to retrieve footage, only to find that the camera hasn't picked up license plates, or even the audio when I've read them out to make sure they're captured. Even in the daytime, the footage isn't great. It might be 1080p, but the sensor looks like it was taken from a 2005 flip phone, the footage is so grainy.
Add on to that a battery life of around 2h30m, and a maximum micro SD card size of 32gb, I often can't guarantee that I've even still _got_ footage when I get home - it loops over older videos when the card is full, which is about 2 hours worth of footage at 1080p.
I'm especially annoyed because I found out that the website my parents bought it from had marked it up considerably, and then mysteriously vanished once the Christmas season had passed. The price seems to vary massively - some vendors sell it for around £25, but I've seen it listed at £60 at times.
Anyway, this was a rant/warning for anyone on the lookout for a helmet camera: the Mini F9 is very bad. I've just picked up a [Drift Ghost XL](https://driftinnovation.com/products/ghost-xl), although I haven't had a chance to test it out yet. I'm hoping the image quality is much better - it also supports livestreaming and has a ~9hr battery life, which should be really good for when I do my [100km charity ride](https://justgiving.com/fundraising/lewis-dale-bikes).

View File

@ -0,0 +1,29 @@
---
title: "TIL: Recovering from an accidental force push in git"
date: 2023-03-17T08:07:08
slug: til-recovering-from-an-accidental-force-push-in-git
---
I don't normally force-push to repos, but I wanted to overwrite the contents of an old project with a new one of the same name. So last night, I created my new project, set the git origin url to the correct one (`git set-url origin git@...`), and then ran `git push -u origin main --force`. After that, I switched off my laptop and went to bed.
Then, this morning, I switch on my other machine and pull the project I'm working on aaaand... everything disappears. Oops. I wasn't thinking and set the wrong url in the repo, and force-pushed to the wrong repository. Luckily it was fairly easy to recover from this, and nothing there goes to prod so there were no services with downtime.
## Getting back to the correct commit
My first instinct was to check `git log`, but because `git push --force` overrides the log, all I saw was the classic "initial commit" message from my new project. However, you can still get the full commit history using [reflog](https://git-scm.com/docs/git-reflog).
Running `git reflog show` gave me a full log of every commit, even the ones from before the force-push. I could see after the two commits I'd accidentally pulled (one with my initial commit, one rebasing it onto my local repo), and then under that was the last real commit I made on the project. The entry looked a bit like this:
![51f1269 HEAD@{4}: commit: Store authors alongside webmention](./src/images/Screenshot-2023-03-17-at-08.01.48.png)
So, running `git checkout HEAD@{n}` where `n` was the number in the curly braces let me checkout my original changes in a detached state. Great, all was not lost!
## Pushing back to main
I didn't really care about the changes I currently had on the repo from the new project - it's just boilerplate setup, and it's still on my other machine anyway. So, to move my currently detached state back to the repo `HEAD`, I doubled down on my mistake and did a second force push: ` git push origin HEAD:main --force`.
And that worked! My git commit history is back, the files are visible on the remote repository, all is not lost.
## Lesson learned?
Eh, maybe? I wouldn't use `push --force` on anything that: a) wasn't owned by me, b) was deploying straight to production/was actually in use. This was just me being overtired and too lazy to delete & remake a repository. I'd probably do it again, which is why I'm writing this.

View File

@ -0,0 +1,72 @@
---
title: "TIL: resizing images on-the-fly with nginx"
date: 2023-03-22T09:46:10
slug: til-resizing-images-on-the-fly-with-nginx
---
Because I've started using Wordpress as a Markdown backend for 11ty, the 11ty image plugin no longer works, which is a bummer. So for a while I've been serving images at their default resolution which is to say, too big.
As it turns out, there's an nginx module, [image_filter](https://nginx.org/en/docs/http/ngx_http_image_filter_module.html), that can be used to resize images per request. I'm using Ubuntu, and the module doesn't exist in the standard repositories, so I followed [nginx's guide to adding their repositories](http://nginx.org/en/linux_packages.html#instructions).
After that, I ran `sudo apt update && sudo apt install nginx-module-image-filter`, and then added the following line to my nginx conf file (`/etc/nginx/nginx.conf`):
```nginx
load_module modules/ngx_http_image_filter_module.so;
```
## Serving resized images
As a baseline, I can serve resized images by just adding the `image_filter` directive to a location that matches images, e.g.:
```nginx
location ~* ^(/.+)\.(jpg|jpeg|jpe|png|gif)$ {
add_header Vary Accept;
image_filter resize 1000 -;
expires 30d;
}
```
This works, but it increases server load a fair bit, because it's now resizing every image on every single request.
## Caching the requests
So, now I need to cache my requests. I can add a separate server that I use `proxy_pass` to call. This server will serve resized images, and then I can use `proxy_cache` to cache the responses for however long I choose.
```nginx
server {
server_name localhost;
listen 8888;
location ~* ^(/.+)\.(jpg|jpeg|jpe|png|gif)$ {
root /path/to/my/images;
image_filter resize 1000 -;
}
}
proxy_cache_path /tmp/images-cache/ levels=1:2 keys_zone=images:10m inactive=24h max_size=100m;
# main server block
server {
listen 80;
location ~* ^(/.+)\.(jpg|jpeg|jpe|png|gif)$ {
add_header Vary Accept;
proxy_pass http://localhost:8888$uri;
proxy_cache images;
proxy_cache_valid 200 30d;
expires 30d;
}
location /wp-content/uploads {
# You need to explicitly define DNS resolution when using
# variables in the proxy_pass directive. This trick resolves that.
proxy_pass http://localhost:8888/;
}
}
```
So now what's happening is that any call to an image file is being proxied to a new backend running on port 8888, and caching the responses. The backend is the part responsible for serving resized images. I've put a 30-day cache on the images, but to be honest it can probably be longer because I very rarely update these.
## Next steps
This is great, and has really reduced the size of the images I'm serving, but next I'd like to have it serve webp versions of images if they exist - but that will require preprocessing the images and then attempting to serve them.

View File

@ -0,0 +1,37 @@
---
title: "Visiting the Northwest Computer Museum"
date: 2023-03-18T15:39:06
slug: visiting-the-northwest-computer-museum
---
I got the chance to visit the [Northwest Computer Museum](https://nwcomputermuseum.org.uk/) today, which was really cool. I'm a bit of a fan of older computers, I think there's something charming about them. So when I found out this was opening a few weeks ago I knew I had to go.
I'm really glad I did - they had an amazing collection, and you're free to try and out and play with all of them, with a few exceptions. They had the usual suspects: Commodore 64, every variant of ZX Spectrum that Sir Clive could muster up, a few different Tandys, the Commodore PET.
![A bank of dozens of vintage computers, all switched on and hooked up to screens](./src/images/museum_wide_shot.jpg)
But then there were some really standout machines. They had an original Apple Lisa, that had been donated to the museum and was undergoing refurbishment. They also had an Apple 1 behind a cabinet. Well, sort of. The board wasn't original, it was a clone board, but the casing and keyboard were all from an original Apple 1.
![An Apple 1, in chestnut-coloured wooden casing, behind a glass cabinet](./src/images/Apple_1.jpg)
They had pretty much every class of computer from the last 5 decades, including a bank of video games consoles, and a VR suite. They even had a Pebble watch, still in it's packaging (which I'd been getting nostalgic about just the night before).
Just off from the main museum is even a room filled with BBC Micro computers, where the museum staff hold classes on maintaining & repairing electronics, as well as teaching programming (both BBC BASIC and modern programming languages). There's even an internet café for people who need regular computer access.
<figure>
![A Radio Shack Tandy TRS-80](./src/images/WhatsApp-Image-2023-03-18-at-15.18.53.jpeg)
<figcaption>A Radio Shack Tandy TRS-80</figcaption>
</figure>
<figure>
![A Commodore 64 with 1942 loaded](./src/images/WhatsApp-Image-2023-03-18-at-15.18.57.jpeg)
<figcaption>A Commodore 64 playing 1942</figcaption>
</figure>
<figure>
![A CRT monitor with Back To The Future 2 on the screen](./src/images/back-to-the-future-2.jpeg)
<figcaption>Playing Back To The Future 2 on a Commodore</figcaption>
</figure>
Overall this was a great day out, and I'd highly recommend anyone in the area go and have a look - it's a brilliant spot and the owner is very passionate about the project.

View File

@ -0,0 +1,25 @@
---
title: "Quick snippet: Detect who pays for Twitter"
date: 2023-04-03T13:43:30
slug: quick-snippet-detect-who-pays-for-twitter
---
Even though Twitter may have tried to disguise who pays for Blue, and who has a legacy verified account, they left the `ld-json` fields intact.
Here's a quick script you can use an in an Arc boost to check for the field and colour the checkmark Red if they pay:
```javascript
setTimeout(() => {
const ldJson = document.querySelector('[type="application/ld+json"]')
const icons = document.querySelectorAll('[data-testid="icon-verified"]')
const profile = JSON.parse(ldJson.textContent);
if (icons && profile.author.disambiguatingDescription !== "verified") {
icons.forEach(i => i.style.color = 'red')
}
}, 1000)
```
I don't imagine it'll work for too long, but it's a stopgap until they remove legacy ticks completely and you can just block Blue subscribers on sight.
**Edit:** Well, this just became a lot easier I suppose

View File

@ -0,0 +1,15 @@
---
title: "I&#8217;m still here"
date: 2023-05-08T12:31:07
slug: im-still-here
---
Just a very quick post to say this blog isn't dead! It's been a while since my last post, but I haven't really had time to do much blog writing recently.
I've been feeling a bit burned-out with tech recently, and just haven't been spending much of my free time working on things.
Instead, as anyone who [follows me on Mastodon](https://dapchat.online/@lewisdaleuk) can probably attest to, I've been spending a lot of time working on my bikes. Having a hobby that doesn't require me to be sat in front of a screen has been really nice, and the physical aspect of it is pretty cathartic.
I've written a few posts on [my omg.lol weblog](https://lewis.weblog.lol) about my efforts to refurbish a 1983 Peugeot bike I picked up a couple of months ago, if that's of any interest. I might also repost them here, for posterity.
Anyway, this was a lot of words to say that I'm still about, I'm just taking a wee break from purely tech-focused pursuits for a while.

Some files were not shown because too many files have changed in this diff Show More