From 5b914a148c109b1ca08273ca3ceaa03009a1b938 Mon Sep 17 00:00:00 2001 From: Tuan Cao Date: Fri, 15 Apr 2022 11:20:32 +0700 Subject: [PATCH] Remove lint Fix build error --- components/MDContent.js | 2 +- lib/_post.js | 113 ---------------------------------------- 2 files changed, 1 insertion(+), 114 deletions(-) diff --git a/components/MDContent.js b/components/MDContent.js index a358fc4..36b57c1 100644 --- a/components/MDContent.js +++ b/components/MDContent.js @@ -13,7 +13,7 @@ function BackLinks({linkList}) {

Link to this note

{linkList.map(aLink => -
+

{aLink.title}

diff --git a/lib/_post.js b/lib/_post.js index 973c304..5655e2c 100644 --- a/lib/_post.js +++ b/lib/_post.js @@ -15,116 +15,3 @@ const postsDirectory = path.join(process.cwd(), 'posts') const isFile = fileName => { return fs.lstatSync(fileName).isFile() } - - -export function getSortedPostsData() { - // Get file names under /posts - const filePaths = Node.getFiles(postsDirectory).filter(fn => fn.endsWith(".md")) - const fileNames = filePaths.map(f => f.split("/")[f.split("/").length - 1].replace(".md", "")) - //console.log("filePaths", filePaths) - - var allPostsData = filePaths.map(fileName => { - //console.log("filename", fileNames) - // Remove ".md" from file name to get id - const slug = fileName.replace(/\.md$/, '').split("/")[fileName.split("/").length - 1] - //console.log("slug", slug) - - // Read markdown file as string - const fileContent = fs.readFileSync(fileName, 'utf8') - - // Use gray-matter to parse the post metadata section - const matterResult = Remark.getFrontMatterData(fileContent)// matter(fileContent).data - const permalink = matterResult.permalink - const content = fileContent.split("---\n")[fileContent.split("---").length -1 ] - //console.log("content", content) - //console.log("frontmatter \n\n", fileContents) - // let processor = unified() - // .use(markdown, { gfm: true }) - // .use(wikiLinkPlugin) - // const htmlContent = Remark.getHtmlContent(fileContent, { - // fileNames:fileNames, - // permalink: `/note/${permalink}` - // }) - - //unified() - //.use(markdown) - //.use(wikiLinkPlugin, { - // permalinks:fileNames, - // pageResolver: function(pageName){return [pageName.replace(/ /g, "-").toLowerCase()]}, - // hrefTemplate: function(permalink){return `/note/${permalink}`} - //}).use(html) - //.process(content, - // function (err, file) { - // //console.log("asd", String(file).slice(0,50)) - // //console.error("remark: ", report(err || file)) - // htmlContent.push(String(file).replace("\n", "")) - // } - //) - - - //console.log("tree",tree) - - - console.log("htmlContent", htmlContent,) - // Combine the data with the slug - return { - id:slug, - ...matterResult, - data:htmlContent - } - }) - - return allPostsData - } - -export async function getSortedPostsData() { - // Get file names under /posts - const fileNames = Node.getFiles(postsDirectory).filter(fn => fn.endsWith(".md")) - console.log("filenames", fileNames) - - var allPostsData = fileNames.map(fileName => { - //console.log("filename", fileName) - // Remove ".md" from file name to get id - const slug = fileName.replace(/\.md$/, '').split("/")[fileName.split("/").length - 1] - //console.log("slug", slug) - - // Read markdown file as string - const fileContents = fs.readFileSync(fileName, 'utf8') - - // Use gray-matter to parse the post metadata section - const matterResult = matter(fileContents).data - const content = fileContents.split("---\n")[fileContents.split("---").length -1 ] - //console.log("content", content) - //console.log("frontmatter \n\n", fileContents) -// let processor = unified() -// .use(markdown, { gfm: true }) -// .use(wikiLinkPlugin) - const htmlContent = [] - remark().use(html). - process(content, - function (err, file) { - //console.log("asd", String(file).slice(0,50)) - //console.error("remark: ", report(err || file)) - htmlContent.push(String(file).replace("\n", "")) - }) - //var processor = unified() - // .use(markdown, { gfm: true }) - // .use(wikiLinkPlugin) - - //console.log("processor", processor); - //const res = process.stdin.pipe(stream(processor)).pipe(process.stdout) - var tree = unified().use(markdown).parse(content) - console.log(tree) - - - console.log("htmlContent", htmlContent.length,) - // Combine the data with the slug - return { - id:slug, - ...matterResult, - data:htmlContent.join("") - } - }) - - return allPostsData -} \ No newline at end of file