Here's the whole file management API.
I am using busboy
which is what multer
uses under the hood. I found it easier to use.
const express = require("express");
const router = express.Router();
const config = require("../../config");
const busboy = require("busboy");
const fs = require("fs");
const SHA256 = require("crypto-js/sha256");
let filesFolderPath = config.paths.files;
router.get("/api/records/:recordUid/files/:fieldUid", async (req, res, next) => {
try {
let { recordUid, fieldUid } = req.params;
let query = `
select
rdf.*,
r.uid recordUid,
round(sizeBytes / 1024, 0) sizeKb,
round(sizeBytes / 1024 / 1024, 0) sizeMb
from recordDataFile rdf
left join record r on r.id = rdf.recordId
left join field f on f.id = rdf.fieldId
where
r.uid = ?
and f.uid = ?;
`;
let rows = await req.pool.query(query, [recordUid, fieldUid]);
res.status(200).send(rows);
} catch (err) {
next(err);
}
});
router.get("/api/files/:hash", async (req, res, next) => {
try {
let { hash } = req.params;
let query = `
select *
from recordDataFile
where hash = ?
`;
let rows = await req.pool.query(query, [hash]);
let fileData = rows[0];
res.download(fileData.path);
} catch (err) {
next(err);
}
});
router.post("/api/files", async (req, res, next) => {
try {
let bb = busboy({
headers: req.headers,
defCharset: "utf8",
limits: {
fileSize: 20 * 1024 * 1024, // 20 mb
files: 5,
},
});
let fields = {};
// Get any text values
bb.on("field", (fieldname, val, fieldnameTruncated, valTruncated) => {
console.log(fieldname, val);
fields[fieldname] = val;
});
// Read file stream
bb.on("file", (fieldname, fileStream, filename, encoding, mimetype) => {
// Prevents hieroglyphs from cyrillic
let originalName = Buffer.from(filename.filename, "latin1").toString("utf8");
let nameParts = originalName.split(".");
let extension = nameParts[nameParts.length - 1]; // without the . from .jpeg
// IMPORTANT!!! FILE NAME CAN'T HAVE SPACES, it won't save properly!!!
let hash = SHA256(`${+new Date()}${originalName}`).toString();
// Absolute path to file
let filePath = `${filesFolderPath}${hash}`;
// Open writeable stream to path
let writeStream = fs.createWriteStream(filePath);
// Pipe the file to the opened stream
fileStream.pipe(writeStream);
// Check for errors
writeStream.on("error", (err) => {
console.log("writeStream", err);
});
// Writing done, stream closed
writeStream.on("close", async (err) => {
// console.log("closing + SQL");
if (err) {
console.log("closing error");
return;
}
let query = `
insert into recordDataFile
(
recordId,
fieldId,
name,
extension,
hash,
path,
sizeBytes,
userId,
created
)
values
(
(select id from record where uid = ?),
(select id from field where uid = ?),
?,
?,
?,
?,
?,
?,
now()
);
`;
let sizeBytes = fs.statSync(filePath).size;
await req.pool.query(query, [fields.recordUid, fields.fieldUid, originalName, extension, hash, filePath, sizeBytes, req.userId]);
// record updated. send notification?
await req.pool.query(`update record set updated = now(), updatedByUserId = ? where uid = ?`, [req.userId, fields.recordUid]);
});
});
bb.on("finish", () => {
res.status(200).send({ success: true });
});
req.pipe(bb); // Hooks the streams together. Without it, you're not feeding busboy any data to parse.
} catch (err) {
console.log("file upload catch", err);
next(err);
}
});
router.delete("/api/files/:hash", async (req, res, next) => {
try {
let { hash } = req.params;
// get the file
let query = `
select * from recordDataFile where hash = ?
`;
let rows = await req.pool.query(query, [hash]);
let file = rows[0];
let filePath = file.path;
// remove file
fs.unlinkSync(filePath);
// delete the file metadata
await req.pool.query(` delete from recordDataFile where hash = ?`, [hash]);
res.status(200).send(rows);
} catch (err) {
next(err);
}
});
module.exports = router;