diff --git a/packages/contented-example/docs/09-Others/02-limitations.md b/packages/contented-example/docs/09-Others/02-limitations.md index 700cd2c5..5024a5d3 100644 --- a/packages/contented-example/docs/09-Others/02-limitations.md +++ b/packages/contented-example/docs/09-Others/02-limitations.md @@ -8,6 +8,6 @@ Since the purpose of Contented is to encourage authoring through a set of tools been for engineers to produce more markdowns (_.md) or processed prose (`_.json`). The focus is never about building a complete documentation website. -In fact, you should take the output(s) of [`contented build`](../03-api.md) that are published into npm; and pull them +In fact, you should take the output(s) of [`contented build`](../03-api.md#contented-build) that are published into npm; and pull them into your main/official website for re-presentation (`npm i, import from`). Remember, the prose/content/narrative created is what's valuable here. Design can easily change, but your content stands the test of time. diff --git a/packages/contented-pipeline/src/Pipeline.ts b/packages/contented-pipeline/src/Pipeline.ts index 585f2f03..ddeae559 100644 --- a/packages/contented-pipeline/src/Pipeline.ts +++ b/packages/contented-pipeline/src/Pipeline.ts @@ -97,16 +97,17 @@ export abstract class ContentedPipeline { protected computePath(sections: string[], parsedPath: ParsedPath) { const dir = `${sections.map((s) => (s !== '..' ? slugify(s) : s)).join('/')}`; - const file = `${slugify(this.replacePrefix(parsedPath.name))}`; + const file = this.computeFileName(parsedPath.name); + const fileFragment = this.computeFileFragment(parsedPath); if (file === 'index') { - return dir; + return `${dir}${fileFragment}`; } if (dir === '') { - return file; + return `${file}${fileFragment}`; } - return `${dir}/${file}`; + return `${dir}/${file}${fileFragment}`; } protected computeSections(parsedPath: ParsedPath) { @@ -125,6 +126,56 @@ export abstract class ContentedPipeline { return path; } + /** + * Compute the file name without any linking. + * Do use this instead of extracting manually. + * @param rawFileName The raw file name from ParsedPath + */ + protected computeFileName(rawFileName: string): string { + if (rawFileName.includes('#')) { + const splitNames = rawFileName.split('#'); + + /** + * We only want it if its exactly length of 2. + * Title#subtile is valid, but Title#subtitle#whatisthis should not be valid. + * If its not valid just use the name as it is. + */ + if (splitNames.length === 2) { + return `${slugify(this.replacePrefix(splitNames[0]))}`; + } + } + return `${slugify(this.replacePrefix(rawFileName))}`; + } + + /** + * Extract the fragment identifier from the read file. + * Depending if extension of the files are provided, + * the fragment identifier will be in ext or the file name. + * @param parsedPath + */ + protected computeFileFragment(parsedPath: ParsedPath): string { + if (parsedPath.ext !== '' && parsedPath.ext.includes('#')) { + const linkMatches = parsedPath.ext.match(/^(\.\w+)(#.+)*$/); + if (linkMatches !== null) { + return linkMatches[2] ?? ''; + } + } + + if (parsedPath.name !== '' && parsedPath.name.includes('#')) { + const splits = parsedPath.name.split('#'); + + /** + * We only want it if its exactly length of 2. + * Title#subtile is valid, but Title#subtitle#whatisthis should not be valid + */ + if (splits.length === 2) { + return `#${splits[1]}`; + } + } + + return ''; + } + protected computeFileId(filePath: string) { return createHash('sha256').update(filePath).digest('hex'); } diff --git a/packages/contented-pipeline/src/Pipeline.unit.ts b/packages/contented-pipeline/src/Pipeline.unit.ts index 1e9b6519..f2ecc8db 100644 --- a/packages/contented-pipeline/src/Pipeline.unit.ts +++ b/packages/contented-pipeline/src/Pipeline.unit.ts @@ -47,3 +47,40 @@ it('should replace numeric prefix path', () => { expect(pipeline.getSanitizedPath('(01)Header/[01]Path.md')).toStrictEqual('header/path'); expect(pipeline.getSanitizedPath(':01:Header/[01-Path.md')).toStrictEqual('header/01-path'); }); + +it('should preserve fragment identifiers for files', () => { + const pipeline = new TestPipeline(__dirname, { + type: 'Type', + pattern: '**/*.md', + processor: 'md', + }); + + // With file extensions + expect(pipeline.getSanitizedPath('path-1.md#content1')).toStrictEqual('path-1#content1'); + + expect(pipeline.getSanitizedPath(':01:path.md#content1')).toStrictEqual('path#content1'); + expect(pipeline.getSanitizedPath(':01path.md#content1')).toStrictEqual('01path#content1'); + + expect(pipeline.getSanitizedPath('[01]path.md#content1')).toStrictEqual('path#content1'); + expect(pipeline.getSanitizedPath('01]path.md#content1')).toStrictEqual('01-path#content1'); + + expect(pipeline.getSanitizedPath('(01)path.md#content1')).toStrictEqual('path#content1'); + expect(pipeline.getSanitizedPath('(01path.md#content1')).toStrictEqual('01path#content1'); + + expect(pipeline.getSanitizedPath('01-path.md#content1')).toStrictEqual('path#content1'); + expect(pipeline.getSanitizedPath('01path.md#content1')).toStrictEqual('01path#content1'); + expect(pipeline.getSanitizedPath('01.md#content1')).toStrictEqual('01#content1'); + + expect(pipeline.getSanitizedPath('01/01.md#content1')).toStrictEqual('01/01#content1'); + expect(pipeline.getSanitizedPath('Header/Path.md#content1')).toStrictEqual('header/path#content1'); + expect(pipeline.getSanitizedPath('Header/01-Path.md#content1')).toStrictEqual('header/path#content1'); + expect(pipeline.getSanitizedPath('01-Header/01-Path.md#content1')).toStrictEqual('header/path#content1'); + expect(pipeline.getSanitizedPath('01-Header/[01]Path.md#content1')).toStrictEqual('header/path#content1'); + expect(pipeline.getSanitizedPath('(01)Header/[01]Path.md#content1')).toStrictEqual('header/path#content1'); + expect(pipeline.getSanitizedPath(':01:Header/[01-Path.md#content1')).toStrictEqual('header/01-path#content1'); + + // Without file extensions + expect(pipeline.getSanitizedPath('path01#content1')).toStrictEqual('path01#content1'); + expect(pipeline.getSanitizedPath('01-path#content1')).toStrictEqual('path#content1'); + expect(pipeline.getSanitizedPath('Header/01-Path#content1')).toStrictEqual('header/path#content1'); +});