add breadcrumb and cleanup docs loader
All checks were successful
Deploy App / docker (ubuntu-latest) (push) Successful in 2m8s

This commit is contained in:
Lee
2024-04-20 22:16:30 +01:00
parent 325fe62569
commit f7a3bb00a5
6 changed files with 244 additions and 94 deletions

View File

@ -1,99 +1,147 @@
/* eslint-disable */
import * as fs from "node:fs";
import path from "node:path";
// @ts-ignore
import read from "read-file";
type Metadata = {
const docsDir = path.join(process.cwd(), "documentation");
/**
* Metadata for documentation content.
*/
type DocsContentMetadata = MDXMetadata & {
/**
* The title of the documentation page.
* The title of this content.
*/
title: string;
/**
* The description of the documentation page.
* The date this content was published.
*/
description: string;
published: string;
/**
* The summary of this content.
*/
summary: string;
};
/**
* The directory where the documentation files are stored.
* Metadata for an MDX file.
*/
const documentationDirectory = path.join(process.cwd(), "documentation");
type MDXMetadata = {
/**
* The slug of the file, defined once read.
*/
slug: string;
/**
* The metadata of the file.
*/
metadata: {
[key: string]: string;
};
/**
* The content of the file.
*/
content: string;
};
/**
* Gets all the documentation files recursively.
*
* @param dirPath the directory path to search for documentation files.
* The regex to match for metadata.
*/
function getDocumentationFiles(dirPath: string): string[] {
let files: string[] = [];
const items = fs.readdirSync(dirPath);
const METADATA_REGEX: RegExp = /---\s*([\s\S]*?)\s*---/;
items.forEach(item => {
const itemPath = path.join(dirPath, item);
/**
* Get the directories in the
* given directory.
*/
export function getDocsDirectories(dir: string) {
const dirs: string[] = [dir];
const paths = fs.readdirSync(dir);
for (const item of paths) {
const itemPath = path.join(dir, item);
const stat = fs.statSync(itemPath);
if (stat.isDirectory()) {
// Recursively traverse directories
files.push(...getDocumentationFiles(itemPath));
} else if (stat.isFile() && path.extname(item) === ".md") {
// Collect markdown files
files.push(itemPath);
dirs.push(...getDocsDirectories(itemPath));
}
});
return files;
}
return dirs;
}
/**
* Gets the content of a documentation file.
* Get the content to
* display in the docs.
*/
export function getDocsContent() {
const directories = getDocsDirectories(docsDir);
const content: DocsContentMetadata[] = [];
for (let directory of directories) {
content.push(...getMetadata<DocsContentMetadata>(directory));
}
return content;
}
export function getDocContent(path?: string[]) {
const docs = getDocsContent();
const slug = path ? path.join("/") : "landing";
return docs.find(doc => doc.slug === slug);
}
/**
* Get the metadata of mdx
* files in the given directory.
*
* @param file the file to get the content of.
* @param directory the directory to search
*/
function getDocumentationFileContent(file: string) {
return read.sync(file, "utf8");
}
/**
* Gets all the documentation pages.
*/
export function getDocumentation() {
const files = getDocumentationFiles(documentationDirectory);
return files.map(file => {
const { metadata, content } = parseFrontmatter(getDocumentationFileContent(file));
let slug = path.relative(documentationDirectory, file).replace(/\.(md)$/, "");
slug = slug.replace(/\\/g, "/"); // Normalize path separators
export function getMetadata<T extends MDXMetadata>(directory: string): T[] {
const files: string[] = fs.readdirSync(directory).filter((file: string): boolean => {
const extension: string = path.extname(file); // The file extension
return extension === ".md" || extension === ".mdx";
}); // Read the MDX files
return files.map((file: string): T => {
const filePath: string = path.join(directory, file); // The path of the file
return {
metadata,
content,
slug,
};
...parseMetadata<T>(fs.readFileSync(filePath, "utf-8")),
slug: filePath
.replace(docsDir, "")
.replace(/\.mdx?$/, "")
.replace(/\\/g, "/")
.substring(1),
}; // Map each file to its metadata
});
}
/**
* Parses the frontmatter of a file.
* Parse the metadata from
* the given content.
*
* @param fileContent the content of the file.
* @param content the content to parse
* @returns the metadata and content
* @template T the type of metadata
*/
function parseFrontmatter(fileContent: string) {
let frontmatterRegex = /---\s*([\s\S]*?)\s*---/;
let match = frontmatterRegex.exec(fileContent);
let frontMatterBlock = match![1];
let content = fileContent.replace(frontmatterRegex, "").trim();
let frontMatterLines = frontMatterBlock.trim().split("\n");
let metadata: Partial<Metadata> = {};
function parseMetadata<T extends MDXMetadata>(content: string): T {
const metadataBlock: string = METADATA_REGEX.exec(content)![1]; // Get the block of metadata
content = content.replace(METADATA_REGEX, "").trim(); // Remove the metadata block from the content
let metadata: Partial<{
[key: string]: string;
}> = {}; // The metadata to return
frontMatterLines.forEach(line => {
let [key, ...valueArr] = line.split(": ");
let value = valueArr.join(": ").trim();
value = value.replace(/^['"](.*)['"]$/, "$1"); // Remove quotes
metadata[key.trim() as keyof Metadata] = value;
});
// Parse the metadata block as a key-value pair
metadataBlock
.trim() // Trim any leading or trailing whitespace
.split("\n") // Get each line
.forEach((line: string): void => {
const split: string[] = line.split(": "); // Split the metadata by the colon
let value: string = split[1].trim(); // The value of the metadata
value = value.replace(/^['"](.*)['"]$/, "$1"); // Remove quotes
metadata[split[0].trim()] = value; // Add the metadata to the object
});
return { metadata: metadata as Metadata, content };
// Return the metadata and content. The initial
// slug is empty, and is defined later on.
return { ...metadata, content } as T;
}