mirror of
https://github.com/swissmakers/swiss-datashare.git
synced 2026-04-17 12:43:13 +02:00
feat: add support for S3 as a storage provider (#659)
* add s3 * instance the s3 client dynamically * refactor code * fix format * add docs * add docs * fix issue with s3 upload if you use the base path, fix issue with archiving -> disable archiving for s3 * split file service in local and s3 file service and fix s3 upload chunking * add working download/view * add new features to local service (from main branch) * revert s3 service and add working delete/remove functionality * refactor s3 service * Update backend/src/file/s3.service.ts Co-authored-by: Elias Schneider <login@eliasschneider.com> * Update frontend/src/components/admin/configuration/ConfigurationNavBar.tsx Co-authored-by: Elias Schneider <login@eliasschneider.com> * Update docs/docs/setup/s3.md Co-authored-by: Elias Schneider <login@eliasschneider.com> * Update backend/prisma/seed/config.seed.ts Co-authored-by: Elias Schneider <login@eliasschneider.com> * add note for ZIP archive in docs * create logger instance * make s3 instance dynamic * add icon import * remove console.logs * add correct pdf viewing format * add storage provider to share * refactor: run formatter * chore: add prisma migration * fix: don't expose `storageProvider` * chore: improve config variables description --------- Co-authored-by: Elias Schneider <login@eliasschneider.com>
This commit is contained in:
1559
backend/package-lock.json
generated
1559
backend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -13,6 +13,7 @@
|
|||||||
"seed": "ts-node prisma/seed/config.seed.ts"
|
"seed": "ts-node prisma/seed/config.seed.ts"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@aws-sdk/client-s3": "^3.679.0",
|
||||||
"@nestjs/cache-manager": "^2.2.2",
|
"@nestjs/cache-manager": "^2.2.2",
|
||||||
"@nestjs/common": "^10.4.3",
|
"@nestjs/common": "^10.4.3",
|
||||||
"@nestjs/config": "^3.2.3",
|
"@nestjs/config": "^3.2.3",
|
||||||
|
|||||||
@@ -0,0 +1,24 @@
|
|||||||
|
-- RedefineTables
|
||||||
|
PRAGMA defer_foreign_keys=ON;
|
||||||
|
PRAGMA foreign_keys=OFF;
|
||||||
|
CREATE TABLE "new_Share" (
|
||||||
|
"id" TEXT NOT NULL PRIMARY KEY,
|
||||||
|
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"name" TEXT,
|
||||||
|
"uploadLocked" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
"isZipReady" BOOLEAN NOT NULL DEFAULT false,
|
||||||
|
"views" INTEGER NOT NULL DEFAULT 0,
|
||||||
|
"expiration" DATETIME NOT NULL,
|
||||||
|
"description" TEXT,
|
||||||
|
"removedReason" TEXT,
|
||||||
|
"creatorId" TEXT,
|
||||||
|
"reverseShareId" TEXT,
|
||||||
|
"storageProvider" TEXT NOT NULL DEFAULT 'LOCAL',
|
||||||
|
CONSTRAINT "Share_creatorId_fkey" FOREIGN KEY ("creatorId") REFERENCES "User" ("id") ON DELETE CASCADE ON UPDATE CASCADE,
|
||||||
|
CONSTRAINT "Share_reverseShareId_fkey" FOREIGN KEY ("reverseShareId") REFERENCES "ReverseShare" ("id") ON DELETE CASCADE ON UPDATE CASCADE
|
||||||
|
);
|
||||||
|
INSERT INTO "new_Share" ("createdAt", "creatorId", "description", "expiration", "id", "isZipReady", "name", "removedReason", "reverseShareId", "uploadLocked", "views") SELECT "createdAt", "creatorId", "description", "expiration", "id", "isZipReady", "name", "removedReason", "reverseShareId", "uploadLocked", "views" FROM "Share";
|
||||||
|
DROP TABLE "Share";
|
||||||
|
ALTER TABLE "new_Share" RENAME TO "Share";
|
||||||
|
PRAGMA foreign_keys=ON;
|
||||||
|
PRAGMA defer_foreign_keys=OFF;
|
||||||
@@ -95,6 +95,7 @@ model Share {
|
|||||||
security ShareSecurity?
|
security ShareSecurity?
|
||||||
recipients ShareRecipient[]
|
recipients ShareRecipient[]
|
||||||
files File[]
|
files File[]
|
||||||
|
storageProvider String @default("LOCAL")
|
||||||
}
|
}
|
||||||
|
|
||||||
model ReverseShare {
|
model ReverseShare {
|
||||||
|
|||||||
@@ -318,6 +318,38 @@ const configVariables: ConfigVariables = {
|
|||||||
obscured: true,
|
obscured: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
s3: {
|
||||||
|
enabled: {
|
||||||
|
type: "boolean",
|
||||||
|
defaultValue: "false",
|
||||||
|
},
|
||||||
|
endpoint: {
|
||||||
|
type: "string",
|
||||||
|
defaultValue: "",
|
||||||
|
},
|
||||||
|
region: {
|
||||||
|
type: "string",
|
||||||
|
defaultValue: "",
|
||||||
|
},
|
||||||
|
bucketName: {
|
||||||
|
type: "string",
|
||||||
|
defaultValue: "",
|
||||||
|
},
|
||||||
|
bucketPath: {
|
||||||
|
type: "string",
|
||||||
|
defaultValue: "",
|
||||||
|
},
|
||||||
|
key: {
|
||||||
|
type: "string",
|
||||||
|
defaultValue: "",
|
||||||
|
secret: true,
|
||||||
|
},
|
||||||
|
secret: {
|
||||||
|
type: "string",
|
||||||
|
defaultValue: "",
|
||||||
|
obscured: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
type ConfigVariables = {
|
type ConfigVariables = {
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ import { CreateShareGuard } from "src/share/guard/createShare.guard";
|
|||||||
import { ShareOwnerGuard } from "src/share/guard/shareOwner.guard";
|
import { ShareOwnerGuard } from "src/share/guard/shareOwner.guard";
|
||||||
import { FileService } from "./file.service";
|
import { FileService } from "./file.service";
|
||||||
import { FileSecurityGuard } from "./guard/fileSecurity.guard";
|
import { FileSecurityGuard } from "./guard/fileSecurity.guard";
|
||||||
|
import * as mime from "mime-types";
|
||||||
|
|
||||||
@Controller("shares/:shareId/files")
|
@Controller("shares/:shareId/files")
|
||||||
export class FileController {
|
export class FileController {
|
||||||
@@ -53,7 +54,7 @@ export class FileController {
|
|||||||
@Res({ passthrough: true }) res: Response,
|
@Res({ passthrough: true }) res: Response,
|
||||||
@Param("shareId") shareId: string,
|
@Param("shareId") shareId: string,
|
||||||
) {
|
) {
|
||||||
const zip = this.fileService.getZip(shareId);
|
const zip = await this.fileService.getZip(shareId);
|
||||||
res.set({
|
res.set({
|
||||||
"Content-Type": "application/zip",
|
"Content-Type": "application/zip",
|
||||||
"Content-Disposition": contentDisposition(`${shareId}.zip`),
|
"Content-Disposition": contentDisposition(`${shareId}.zip`),
|
||||||
@@ -73,13 +74,18 @@ export class FileController {
|
|||||||
const file = await this.fileService.get(shareId, fileId);
|
const file = await this.fileService.get(shareId, fileId);
|
||||||
|
|
||||||
const headers = {
|
const headers = {
|
||||||
"Content-Type": file.metaData.mimeType,
|
"Content-Type":
|
||||||
|
mime?.lookup?.(file.metaData.name) || "application/octet-stream",
|
||||||
"Content-Length": file.metaData.size,
|
"Content-Length": file.metaData.size,
|
||||||
"Content-Security-Policy": "script-src 'none'",
|
"Content-Security-Policy": "script-src 'none'",
|
||||||
};
|
};
|
||||||
|
|
||||||
if (download === "true") {
|
if (download === "true") {
|
||||||
headers["Content-Disposition"] = contentDisposition(file.metaData.name);
|
headers["Content-Disposition"] = contentDisposition(file.metaData.name);
|
||||||
|
} else {
|
||||||
|
headers["Content-Disposition"] = contentDisposition(file.metaData.name, {
|
||||||
|
type: "inline",
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
res.set(headers);
|
res.set(headers);
|
||||||
|
|||||||
@@ -4,11 +4,13 @@ import { ReverseShareModule } from "src/reverseShare/reverseShare.module";
|
|||||||
import { ShareModule } from "src/share/share.module";
|
import { ShareModule } from "src/share/share.module";
|
||||||
import { FileController } from "./file.controller";
|
import { FileController } from "./file.controller";
|
||||||
import { FileService } from "./file.service";
|
import { FileService } from "./file.service";
|
||||||
|
import { LocalFileService } from "./local.service";
|
||||||
|
import { S3FileService } from "./s3.service";
|
||||||
|
|
||||||
@Module({
|
@Module({
|
||||||
imports: [JwtModule.register({}), ReverseShareModule, ShareModule],
|
imports: [JwtModule.register({}), ReverseShareModule, ShareModule],
|
||||||
controllers: [FileController],
|
controllers: [FileController],
|
||||||
providers: [FileService],
|
providers: [FileService, LocalFileService, S3FileService],
|
||||||
exports: [FileService],
|
exports: [FileService],
|
||||||
})
|
})
|
||||||
export class FileModule {}
|
export class FileModule {}
|
||||||
|
|||||||
@@ -1,162 +1,88 @@
|
|||||||
import {
|
import { Injectable } from "@nestjs/common";
|
||||||
BadRequestException,
|
import { LocalFileService } from "./local.service";
|
||||||
HttpException,
|
import { S3FileService } from "./s3.service";
|
||||||
HttpStatus,
|
|
||||||
Injectable,
|
|
||||||
InternalServerErrorException,
|
|
||||||
NotFoundException,
|
|
||||||
} from "@nestjs/common";
|
|
||||||
import { JwtService } from "@nestjs/jwt";
|
|
||||||
import * as crypto from "crypto";
|
|
||||||
import { createReadStream } from "fs";
|
|
||||||
import * as fs from "fs/promises";
|
|
||||||
import * as mime from "mime-types";
|
|
||||||
import { ConfigService } from "src/config/config.service";
|
import { ConfigService } from "src/config/config.service";
|
||||||
import { PrismaService } from "src/prisma/prisma.service";
|
import { Readable } from "stream";
|
||||||
import { validate as isValidUUID } from "uuid";
|
import { PrismaService } from "../prisma/prisma.service";
|
||||||
import { SHARE_DIRECTORY } from "../constants";
|
|
||||||
|
|
||||||
@Injectable()
|
@Injectable()
|
||||||
export class FileService {
|
export class FileService {
|
||||||
constructor(
|
constructor(
|
||||||
private prisma: PrismaService,
|
private prisma: PrismaService,
|
||||||
private config: ConfigService,
|
private localFileService: LocalFileService,
|
||||||
|
private s3FileService: S3FileService,
|
||||||
|
private configService: ConfigService,
|
||||||
) {}
|
) {}
|
||||||
|
|
||||||
|
// Determine which service to use based on the current config value
|
||||||
|
// shareId is optional -> can be used to overwrite a storage provider
|
||||||
|
private getStorageService(
|
||||||
|
storageProvider?: string,
|
||||||
|
): S3FileService | LocalFileService {
|
||||||
|
if (storageProvider != undefined)
|
||||||
|
return storageProvider == "S3"
|
||||||
|
? this.s3FileService
|
||||||
|
: this.localFileService;
|
||||||
|
return this.configService.get("s3.enabled")
|
||||||
|
? this.s3FileService
|
||||||
|
: this.localFileService;
|
||||||
|
}
|
||||||
|
|
||||||
async create(
|
async create(
|
||||||
data: string,
|
data: string,
|
||||||
chunk: { index: number; total: number },
|
chunk: { index: number; total: number },
|
||||||
file: { id?: string; name: string },
|
file: {
|
||||||
|
id?: string;
|
||||||
|
name: string;
|
||||||
|
},
|
||||||
shareId: string,
|
shareId: string,
|
||||||
) {
|
) {
|
||||||
if (!file.id) {
|
const storageService = this.getStorageService();
|
||||||
file.id = crypto.randomUUID();
|
return storageService.create(data, chunk, file, shareId);
|
||||||
} else if (!isValidUUID(file.id)) {
|
|
||||||
throw new BadRequestException("Invalid file ID format");
|
|
||||||
}
|
|
||||||
|
|
||||||
const share = await this.prisma.share.findUnique({
|
|
||||||
where: { id: shareId },
|
|
||||||
include: { files: true, reverseShare: true },
|
|
||||||
});
|
|
||||||
|
|
||||||
if (share.uploadLocked)
|
|
||||||
throw new BadRequestException("Share is already completed");
|
|
||||||
|
|
||||||
let diskFileSize: number;
|
|
||||||
try {
|
|
||||||
diskFileSize = (
|
|
||||||
await fs.stat(`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`)
|
|
||||||
).size;
|
|
||||||
} catch {
|
|
||||||
diskFileSize = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the sent chunk index and the expected chunk index doesn't match throw an error
|
|
||||||
const chunkSize = this.config.get("share.chunkSize");
|
|
||||||
const expectedChunkIndex = Math.ceil(diskFileSize / chunkSize);
|
|
||||||
|
|
||||||
if (expectedChunkIndex != chunk.index)
|
|
||||||
throw new BadRequestException({
|
|
||||||
message: "Unexpected chunk received",
|
|
||||||
error: "unexpected_chunk_index",
|
|
||||||
expectedChunkIndex,
|
|
||||||
});
|
|
||||||
|
|
||||||
const buffer = Buffer.from(data, "base64");
|
|
||||||
|
|
||||||
// Check if there is enough space on the server
|
|
||||||
const space = await fs.statfs(SHARE_DIRECTORY);
|
|
||||||
const availableSpace = space.bavail * space.bsize;
|
|
||||||
if (availableSpace < buffer.byteLength) {
|
|
||||||
throw new InternalServerErrorException("Not enough space on the server");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if share size limit is exceeded
|
|
||||||
const fileSizeSum = share.files.reduce(
|
|
||||||
(n, { size }) => n + parseInt(size),
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
|
|
||||||
const shareSizeSum = fileSizeSum + diskFileSize + buffer.byteLength;
|
|
||||||
|
|
||||||
if (
|
|
||||||
shareSizeSum > this.config.get("share.maxSize") ||
|
|
||||||
(share.reverseShare?.maxShareSize &&
|
|
||||||
shareSizeSum > parseInt(share.reverseShare.maxShareSize))
|
|
||||||
) {
|
|
||||||
throw new HttpException(
|
|
||||||
"Max share size exceeded",
|
|
||||||
HttpStatus.PAYLOAD_TOO_LARGE,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
await fs.appendFile(
|
|
||||||
`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`,
|
|
||||||
buffer,
|
|
||||||
);
|
|
||||||
|
|
||||||
const isLastChunk = chunk.index == chunk.total - 1;
|
|
||||||
if (isLastChunk) {
|
|
||||||
await fs.rename(
|
|
||||||
`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`,
|
|
||||||
`${SHARE_DIRECTORY}/${shareId}/${file.id}`,
|
|
||||||
);
|
|
||||||
const fileSize = (
|
|
||||||
await fs.stat(`${SHARE_DIRECTORY}/${shareId}/${file.id}`)
|
|
||||||
).size;
|
|
||||||
await this.prisma.file.create({
|
|
||||||
data: {
|
|
||||||
id: file.id,
|
|
||||||
name: file.name,
|
|
||||||
size: fileSize.toString(),
|
|
||||||
share: { connect: { id: shareId } },
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return file;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async get(shareId: string, fileId: string) {
|
async get(shareId: string, fileId: string): Promise<File> {
|
||||||
const fileMetaData = await this.prisma.file.findUnique({
|
const share = await this.prisma.share.findFirst({
|
||||||
where: { id: fileId },
|
where: { id: shareId },
|
||||||
});
|
});
|
||||||
|
const storageService = this.getStorageService(share.storageProvider);
|
||||||
if (!fileMetaData) throw new NotFoundException("File not found");
|
return storageService.get(shareId, fileId);
|
||||||
|
|
||||||
const file = createReadStream(`${SHARE_DIRECTORY}/${shareId}/${fileId}`);
|
|
||||||
|
|
||||||
return {
|
|
||||||
metaData: {
|
|
||||||
mimeType: mime.contentType(fileMetaData.name.split(".").pop()),
|
|
||||||
...fileMetaData,
|
|
||||||
size: fileMetaData.size,
|
|
||||||
},
|
|
||||||
file,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async remove(shareId: string, fileId: string) {
|
async remove(shareId: string, fileId: string) {
|
||||||
const fileMetaData = await this.prisma.file.findUnique({
|
const storageService = this.getStorageService();
|
||||||
where: { id: fileId },
|
return storageService.remove(shareId, fileId);
|
||||||
});
|
|
||||||
|
|
||||||
if (!fileMetaData) throw new NotFoundException("File not found");
|
|
||||||
|
|
||||||
await fs.unlink(`${SHARE_DIRECTORY}/${shareId}/${fileId}`);
|
|
||||||
|
|
||||||
await this.prisma.file.delete({ where: { id: fileId } });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async deleteAllFiles(shareId: string) {
|
async deleteAllFiles(shareId: string) {
|
||||||
await fs.rm(`${SHARE_DIRECTORY}/${shareId}`, {
|
const storageService = this.getStorageService();
|
||||||
recursive: true,
|
return storageService.deleteAllFiles(shareId);
|
||||||
force: true,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
getZip(shareId: string) {
|
getZip(shareId: string) {
|
||||||
return createReadStream(`${SHARE_DIRECTORY}/${shareId}/archive.zip`);
|
const storageService = this.getStorageService();
|
||||||
|
return this.streamToUint8Array(storageService.getZip(shareId) as Readable);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async streamToUint8Array(stream: Readable): Promise<Uint8Array> {
|
||||||
|
const chunks: Buffer[] = [];
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
stream.on("data", (chunk) => chunks.push(Buffer.from(chunk)));
|
||||||
|
stream.on("end", () => resolve(new Uint8Array(Buffer.concat(chunks))));
|
||||||
|
stream.on("error", reject);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface File {
|
||||||
|
metaData: {
|
||||||
|
id: string;
|
||||||
|
size: string;
|
||||||
|
createdAt: Date;
|
||||||
|
mimeType: string | false;
|
||||||
|
name: string;
|
||||||
|
shareId: string;
|
||||||
|
};
|
||||||
|
file: Readable;
|
||||||
|
}
|
||||||
|
|||||||
161
backend/src/file/local.service.ts
Normal file
161
backend/src/file/local.service.ts
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
import {
|
||||||
|
BadRequestException,
|
||||||
|
HttpException,
|
||||||
|
HttpStatus,
|
||||||
|
Injectable,
|
||||||
|
InternalServerErrorException,
|
||||||
|
NotFoundException,
|
||||||
|
} from "@nestjs/common";
|
||||||
|
import * as crypto from "crypto";
|
||||||
|
import { createReadStream } from "fs";
|
||||||
|
import * as fs from "fs/promises";
|
||||||
|
import * as mime from "mime-types";
|
||||||
|
import { ConfigService } from "src/config/config.service";
|
||||||
|
import { PrismaService } from "src/prisma/prisma.service";
|
||||||
|
import { validate as isValidUUID } from "uuid";
|
||||||
|
import { SHARE_DIRECTORY } from "../constants";
|
||||||
|
|
||||||
|
@Injectable()
|
||||||
|
export class LocalFileService {
|
||||||
|
constructor(
|
||||||
|
private prisma: PrismaService,
|
||||||
|
private config: ConfigService,
|
||||||
|
) {}
|
||||||
|
|
||||||
|
async create(
|
||||||
|
data: string,
|
||||||
|
chunk: { index: number; total: number },
|
||||||
|
file: { id?: string; name: string },
|
||||||
|
shareId: string,
|
||||||
|
) {
|
||||||
|
if (!file.id) {
|
||||||
|
file.id = crypto.randomUUID();
|
||||||
|
} else if (!isValidUUID(file.id)) {
|
||||||
|
throw new BadRequestException("Invalid file ID format");
|
||||||
|
}
|
||||||
|
|
||||||
|
const share = await this.prisma.share.findUnique({
|
||||||
|
where: { id: shareId },
|
||||||
|
include: { files: true, reverseShare: true },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (share.uploadLocked)
|
||||||
|
throw new BadRequestException("Share is already completed");
|
||||||
|
|
||||||
|
let diskFileSize: number;
|
||||||
|
try {
|
||||||
|
diskFileSize = (
|
||||||
|
await fs.stat(`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`)
|
||||||
|
).size;
|
||||||
|
} catch {
|
||||||
|
diskFileSize = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the sent chunk index and the expected chunk index doesn't match throw an error
|
||||||
|
const chunkSize = this.config.get("share.chunkSize");
|
||||||
|
const expectedChunkIndex = Math.ceil(diskFileSize / chunkSize);
|
||||||
|
|
||||||
|
if (expectedChunkIndex != chunk.index)
|
||||||
|
throw new BadRequestException({
|
||||||
|
message: "Unexpected chunk received",
|
||||||
|
error: "unexpected_chunk_index",
|
||||||
|
expectedChunkIndex,
|
||||||
|
});
|
||||||
|
|
||||||
|
const buffer = Buffer.from(data, "base64");
|
||||||
|
|
||||||
|
// Check if there is enough space on the server
|
||||||
|
const space = await fs.statfs(SHARE_DIRECTORY);
|
||||||
|
const availableSpace = space.bavail * space.bsize;
|
||||||
|
if (availableSpace < buffer.byteLength) {
|
||||||
|
throw new InternalServerErrorException("Not enough space on the server");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if share size limit is exceeded
|
||||||
|
const fileSizeSum = share.files.reduce(
|
||||||
|
(n, { size }) => n + parseInt(size),
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
|
||||||
|
const shareSizeSum = fileSizeSum + diskFileSize + buffer.byteLength;
|
||||||
|
|
||||||
|
if (
|
||||||
|
shareSizeSum > this.config.get("share.maxSize") ||
|
||||||
|
(share.reverseShare?.maxShareSize &&
|
||||||
|
shareSizeSum > parseInt(share.reverseShare.maxShareSize))
|
||||||
|
) {
|
||||||
|
throw new HttpException(
|
||||||
|
"Max share size exceeded",
|
||||||
|
HttpStatus.PAYLOAD_TOO_LARGE,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
await fs.appendFile(
|
||||||
|
`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`,
|
||||||
|
buffer,
|
||||||
|
);
|
||||||
|
|
||||||
|
const isLastChunk = chunk.index == chunk.total - 1;
|
||||||
|
if (isLastChunk) {
|
||||||
|
await fs.rename(
|
||||||
|
`${SHARE_DIRECTORY}/${shareId}/${file.id}.tmp-chunk`,
|
||||||
|
`${SHARE_DIRECTORY}/${shareId}/${file.id}`,
|
||||||
|
);
|
||||||
|
const fileSize = (
|
||||||
|
await fs.stat(`${SHARE_DIRECTORY}/${shareId}/${file.id}`)
|
||||||
|
).size;
|
||||||
|
await this.prisma.file.create({
|
||||||
|
data: {
|
||||||
|
id: file.id,
|
||||||
|
name: file.name,
|
||||||
|
size: fileSize.toString(),
|
||||||
|
share: { connect: { id: shareId } },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return file;
|
||||||
|
}
|
||||||
|
|
||||||
|
async get(shareId: string, fileId: string) {
|
||||||
|
const fileMetaData = await this.prisma.file.findUnique({
|
||||||
|
where: { id: fileId },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!fileMetaData) throw new NotFoundException("File not found");
|
||||||
|
|
||||||
|
const file = createReadStream(`${SHARE_DIRECTORY}/${shareId}/${fileId}`);
|
||||||
|
|
||||||
|
return {
|
||||||
|
metaData: {
|
||||||
|
mimeType: mime.contentType(fileMetaData.name.split(".").pop()),
|
||||||
|
...fileMetaData,
|
||||||
|
size: fileMetaData.size,
|
||||||
|
},
|
||||||
|
file,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
async remove(shareId: string, fileId: string) {
|
||||||
|
const fileMetaData = await this.prisma.file.findUnique({
|
||||||
|
where: { id: fileId },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!fileMetaData) throw new NotFoundException("File not found");
|
||||||
|
|
||||||
|
await fs.unlink(`${SHARE_DIRECTORY}/${shareId}/${fileId}`);
|
||||||
|
|
||||||
|
await this.prisma.file.delete({ where: { id: fileId } });
|
||||||
|
}
|
||||||
|
|
||||||
|
async deleteAllFiles(shareId: string) {
|
||||||
|
await fs.rm(`${SHARE_DIRECTORY}/${shareId}`, {
|
||||||
|
recursive: true,
|
||||||
|
force: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
getZip(shareId: string) {
|
||||||
|
return createReadStream(`${SHARE_DIRECTORY}/${shareId}/archive.zip`);
|
||||||
|
}
|
||||||
|
}
|
||||||
299
backend/src/file/s3.service.ts
Normal file
299
backend/src/file/s3.service.ts
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
import {
|
||||||
|
BadRequestException,
|
||||||
|
Injectable,
|
||||||
|
InternalServerErrorException,
|
||||||
|
NotFoundException,
|
||||||
|
Logger,
|
||||||
|
} from "@nestjs/common";
|
||||||
|
import {
|
||||||
|
AbortMultipartUploadCommand,
|
||||||
|
CompleteMultipartUploadCommand,
|
||||||
|
CreateMultipartUploadCommand,
|
||||||
|
DeleteObjectCommand,
|
||||||
|
DeleteObjectsCommand,
|
||||||
|
GetObjectCommand,
|
||||||
|
HeadObjectCommand,
|
||||||
|
ListObjectsV2Command,
|
||||||
|
S3Client,
|
||||||
|
UploadPartCommand,
|
||||||
|
UploadPartCommandOutput,
|
||||||
|
} from "@aws-sdk/client-s3";
|
||||||
|
import { PrismaService } from "src/prisma/prisma.service";
|
||||||
|
import { ConfigService } from "src/config/config.service";
|
||||||
|
import * as crypto from "crypto";
|
||||||
|
import * as mime from "mime-types";
|
||||||
|
import { File } from "./file.service";
|
||||||
|
import { Readable } from "stream";
|
||||||
|
import { validate as isValidUUID } from "uuid";
|
||||||
|
|
||||||
|
@Injectable()
|
||||||
|
export class S3FileService {
|
||||||
|
private readonly logger = new Logger(S3FileService.name);
|
||||||
|
|
||||||
|
private multipartUploads: Record<
|
||||||
|
string,
|
||||||
|
{
|
||||||
|
uploadId: string;
|
||||||
|
parts: Array<{ ETag: string | undefined; PartNumber: number }>;
|
||||||
|
}
|
||||||
|
> = {};
|
||||||
|
|
||||||
|
constructor(
|
||||||
|
private prisma: PrismaService,
|
||||||
|
private config: ConfigService,
|
||||||
|
) {}
|
||||||
|
|
||||||
|
async create(
|
||||||
|
data: string,
|
||||||
|
chunk: { index: number; total: number },
|
||||||
|
file: { id?: string; name: string },
|
||||||
|
shareId: string,
|
||||||
|
) {
|
||||||
|
if (!file.id) {
|
||||||
|
file.id = crypto.randomUUID();
|
||||||
|
} else if (!isValidUUID(file.id)) {
|
||||||
|
throw new BadRequestException("Invalid file ID format");
|
||||||
|
}
|
||||||
|
|
||||||
|
const buffer = Buffer.from(data, "base64");
|
||||||
|
const key = `${this.getS3Path()}${shareId}/${file.name}`;
|
||||||
|
const bucketName = this.config.get("s3.bucketName");
|
||||||
|
const s3Instance = this.getS3Instance();
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Initialize multipart upload if it's the first chunk
|
||||||
|
if (chunk.index === 0) {
|
||||||
|
const multipartInitResponse = await s3Instance.send(
|
||||||
|
new CreateMultipartUploadCommand({
|
||||||
|
Bucket: bucketName,
|
||||||
|
Key: key,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
const uploadId = multipartInitResponse.UploadId;
|
||||||
|
if (!uploadId) {
|
||||||
|
throw new Error("Failed to initialize multipart upload.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the uploadId and parts list in memory
|
||||||
|
this.multipartUploads[file.id] = {
|
||||||
|
uploadId,
|
||||||
|
parts: [],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the ongoing multipart upload
|
||||||
|
const multipartUpload = this.multipartUploads[file.id];
|
||||||
|
if (!multipartUpload) {
|
||||||
|
throw new InternalServerErrorException(
|
||||||
|
"Multipart upload session not found.",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const uploadId = multipartUpload.uploadId;
|
||||||
|
|
||||||
|
// Upload the current chunk
|
||||||
|
const partNumber = chunk.index + 1; // Part numbers start from 1
|
||||||
|
|
||||||
|
const uploadPartResponse: UploadPartCommandOutput = await s3Instance.send(
|
||||||
|
new UploadPartCommand({
|
||||||
|
Bucket: bucketName,
|
||||||
|
Key: key,
|
||||||
|
PartNumber: partNumber,
|
||||||
|
UploadId: uploadId,
|
||||||
|
Body: buffer,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Store the ETag and PartNumber for later completion
|
||||||
|
multipartUpload.parts.push({
|
||||||
|
ETag: uploadPartResponse.ETag,
|
||||||
|
PartNumber: partNumber,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Complete the multipart upload if it's the last chunk
|
||||||
|
if (chunk.index === chunk.total - 1) {
|
||||||
|
await s3Instance.send(
|
||||||
|
new CompleteMultipartUploadCommand({
|
||||||
|
Bucket: bucketName,
|
||||||
|
Key: key,
|
||||||
|
UploadId: uploadId,
|
||||||
|
MultipartUpload: {
|
||||||
|
Parts: multipartUpload.parts,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Remove the completed upload from memory
|
||||||
|
delete this.multipartUploads[file.id];
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
// Abort the multipart upload if it fails
|
||||||
|
const multipartUpload = this.multipartUploads[file.id];
|
||||||
|
if (multipartUpload) {
|
||||||
|
try {
|
||||||
|
await s3Instance.send(
|
||||||
|
new AbortMultipartUploadCommand({
|
||||||
|
Bucket: bucketName,
|
||||||
|
Key: key,
|
||||||
|
UploadId: multipartUpload.uploadId,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
} catch (abortError) {
|
||||||
|
console.error("Error aborting multipart upload:", abortError);
|
||||||
|
}
|
||||||
|
delete this.multipartUploads[file.id];
|
||||||
|
}
|
||||||
|
this.logger.error(error);
|
||||||
|
throw new Error("Multipart upload failed. The upload has been aborted.");
|
||||||
|
}
|
||||||
|
|
||||||
|
const isLastChunk = chunk.index == chunk.total - 1;
|
||||||
|
if (isLastChunk) {
|
||||||
|
const fileSize: number = await this.getFileSize(shareId, file.name);
|
||||||
|
|
||||||
|
await this.prisma.file.create({
|
||||||
|
data: {
|
||||||
|
id: file.id,
|
||||||
|
name: file.name,
|
||||||
|
size: fileSize.toString(),
|
||||||
|
share: { connect: { id: shareId } },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return file;
|
||||||
|
}
|
||||||
|
|
||||||
|
async get(shareId: string, fileId: string): Promise<File> {
|
||||||
|
const fileName = (
|
||||||
|
await this.prisma.file.findUnique({ where: { id: fileId } })
|
||||||
|
).name;
|
||||||
|
|
||||||
|
const s3Instance = this.getS3Instance();
|
||||||
|
const key = `${this.getS3Path()}${shareId}/${fileName}`;
|
||||||
|
const response = await s3Instance.send(
|
||||||
|
new GetObjectCommand({
|
||||||
|
Bucket: this.config.get("s3.bucketName"),
|
||||||
|
Key: key,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
return {
|
||||||
|
metaData: {
|
||||||
|
id: fileId,
|
||||||
|
size: response.ContentLength?.toString() || "0",
|
||||||
|
name: fileName,
|
||||||
|
shareId: shareId,
|
||||||
|
createdAt: response.LastModified || new Date(),
|
||||||
|
mimeType:
|
||||||
|
mime.contentType(fileId.split(".").pop()) ||
|
||||||
|
"application/octet-stream",
|
||||||
|
},
|
||||||
|
file: response.Body as Readable,
|
||||||
|
} as File;
|
||||||
|
}
|
||||||
|
|
||||||
|
async remove(shareId: string, fileId: string) {
|
||||||
|
const fileMetaData = await this.prisma.file.findUnique({
|
||||||
|
where: { id: fileId },
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!fileMetaData) throw new NotFoundException("File not found");
|
||||||
|
|
||||||
|
const key = `${this.getS3Path()}${shareId}/${fileMetaData.name}`;
|
||||||
|
const s3Instance = this.getS3Instance();
|
||||||
|
|
||||||
|
try {
|
||||||
|
await s3Instance.send(
|
||||||
|
new DeleteObjectCommand({
|
||||||
|
Bucket: this.config.get("s3.bucketName"),
|
||||||
|
Key: key,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error("Could not delete file from S3");
|
||||||
|
}
|
||||||
|
|
||||||
|
await this.prisma.file.delete({ where: { id: fileId } });
|
||||||
|
}
|
||||||
|
|
||||||
|
async deleteAllFiles(shareId: string) {
|
||||||
|
const prefix = `${this.getS3Path()}${shareId}/`;
|
||||||
|
const s3Instance = this.getS3Instance();
|
||||||
|
|
||||||
|
try {
|
||||||
|
// List all objects under the given prefix
|
||||||
|
const listResponse = await s3Instance.send(
|
||||||
|
new ListObjectsV2Command({
|
||||||
|
Bucket: this.config.get("s3.bucketName"),
|
||||||
|
Prefix: prefix,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!listResponse.Contents || listResponse.Contents.length === 0) {
|
||||||
|
throw new Error(`No files found for share ${shareId}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the keys of the files to be deleted
|
||||||
|
const objectsToDelete = listResponse.Contents.map((file) => ({
|
||||||
|
Key: file.Key!,
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Delete all files in a single request (up to 1000 objects at once)
|
||||||
|
await s3Instance.send(
|
||||||
|
new DeleteObjectsCommand({
|
||||||
|
Bucket: this.config.get("s3.bucketName"),
|
||||||
|
Delete: {
|
||||||
|
Objects: objectsToDelete,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error("Could not delete all files from S3");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async getFileSize(shareId: string, fileName: string): Promise<number> {
|
||||||
|
const key = `${this.getS3Path()}${shareId}/${fileName}`;
|
||||||
|
const s3Instance = this.getS3Instance();
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Get metadata of the file using HeadObjectCommand
|
||||||
|
const headObjectResponse = await s3Instance.send(
|
||||||
|
new HeadObjectCommand({
|
||||||
|
Bucket: this.config.get("s3.bucketName"),
|
||||||
|
Key: key,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Return ContentLength which is the file size in bytes
|
||||||
|
return headObjectResponse.ContentLength ?? 0;
|
||||||
|
} catch (error) {
|
||||||
|
throw new Error("Could not retrieve file size");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getS3Instance(): S3Client {
|
||||||
|
return new S3Client({
|
||||||
|
endpoint: this.config.get("s3.endpoint"),
|
||||||
|
region: this.config.get("s3.region"),
|
||||||
|
credentials: {
|
||||||
|
accessKeyId: this.config.get("s3.key"),
|
||||||
|
secretAccessKey: this.config.get("s3.secret"),
|
||||||
|
},
|
||||||
|
forcePathStyle: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
getZip() {
|
||||||
|
throw new BadRequestException(
|
||||||
|
"ZIP download is not supported with S3 storage",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
getS3Path(): string {
|
||||||
|
const configS3Path = this.config.get("s3.bucketPath");
|
||||||
|
return configS3Path ? `${configS3Path}/` : "";
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -24,6 +24,7 @@ import { CreateShareDTO } from "./dto/createShare.dto";
|
|||||||
export class ShareService {
|
export class ShareService {
|
||||||
constructor(
|
constructor(
|
||||||
private prisma: PrismaService,
|
private prisma: PrismaService,
|
||||||
|
private configService: ConfigService,
|
||||||
private fileService: FileService,
|
private fileService: FileService,
|
||||||
private emailService: EmailService,
|
private emailService: EmailService,
|
||||||
private config: ConfigService,
|
private config: ConfigService,
|
||||||
@@ -86,6 +87,7 @@ export class ShareService {
|
|||||||
? share.recipients.map((email) => ({ email }))
|
? share.recipients.map((email) => ({ email }))
|
||||||
: [],
|
: [],
|
||||||
},
|
},
|
||||||
|
storageProvider: this.configService.get("s3.enabled") ? "S3" : "LOCAL",
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -105,6 +107,8 @@ export class ShareService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async createZip(shareId: string) {
|
async createZip(shareId: string) {
|
||||||
|
if (this.config.get("s3.enabled")) return;
|
||||||
|
|
||||||
const path = `${SHARE_DIRECTORY}/${shareId}`;
|
const path = `${SHARE_DIRECTORY}/${shareId}`;
|
||||||
|
|
||||||
const files = await this.prisma.file.findMany({ where: { shareId } });
|
const files = await this.prisma.file.findMany({ where: { shareId } });
|
||||||
|
|||||||
32
docs/docs/setup/s3.md
Normal file
32
docs/docs/setup/s3.md
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
id: s3
|
||||||
|
---
|
||||||
|
|
||||||
|
# S3
|
||||||
|
|
||||||
|
You are able to add your preferred S3 provider, like AWS, DigitalOcean, Exoscale or Infomaniak. However, if you don't
|
||||||
|
want to store your files on a S3 bucket, you don't have to. Consider that this feature is `DISABLED` per default.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
You can configure your S3 provider and bucket by going to the configuration page in your admin dashboard `/admin/config/s3`.
|
||||||
|
|
||||||
|
| Key | Description | Value |
|
||||||
|
|:-----------|:-------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------|
|
||||||
|
| enabled | This property enables the storage location on your configured S3 bucket. | `true` |
|
||||||
|
| endpoint | This property is the host from your S3 bucket. | `sos-ch-dk-2` |
|
||||||
|
| region | This property is the region where the bucket is located. | `sos-ch-dk-2.exo.io` |
|
||||||
|
| bucketName | This property is the name of your S3 bucket. | `my-bucket` |
|
||||||
|
| bucketPath | This property defines the folder where you want to store your files which are uploaded. Hint: Don't put a slash in the start or end. | `my/custom/path` (or leave it empty for root) |
|
||||||
|
| key | This is the access key you need to access to your bucket. | `key-asdf` |
|
||||||
|
| secret | This is the secret you need to access to your bucket. | `secret-asdf` |
|
||||||
|
|
||||||
|
Don't forget to save the configuration. :)
|
||||||
|
|
||||||
|
## ClamAV
|
||||||
|
|
||||||
|
Consider that ClamAV scans are not available at the moment if you store your files in a S3 bucket.
|
||||||
|
|
||||||
|
## ZIP
|
||||||
|
|
||||||
|
Creating ZIP archives is not currently supported if you store your files in an S3 bucket.
|
||||||
@@ -36,6 +36,10 @@ const sidebars: SidebarsConfig = {
|
|||||||
type: "doc",
|
type: "doc",
|
||||||
id: "setup/oauth2login",
|
id: "setup/oauth2login",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
type: "doc",
|
||||||
|
id: "setup/s3",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
type: "doc",
|
type: "doc",
|
||||||
id: "setup/upgrading",
|
id: "setup/upgrading",
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ import {
|
|||||||
TbMail,
|
TbMail,
|
||||||
TbShare,
|
TbShare,
|
||||||
TbSocial,
|
TbSocial,
|
||||||
TbSquare,
|
TbBucket,
|
||||||
TbBinaryTree,
|
TbBinaryTree,
|
||||||
TbSettings,
|
TbSettings,
|
||||||
} from "react-icons/tb";
|
} from "react-icons/tb";
|
||||||
@@ -29,6 +29,7 @@ const categories = [
|
|||||||
{ name: "SMTP", icon: <TbAt /> },
|
{ name: "SMTP", icon: <TbAt /> },
|
||||||
{ name: "OAuth", icon: <TbSocial /> },
|
{ name: "OAuth", icon: <TbSocial /> },
|
||||||
{ name: "LDAP", icon: <TbBinaryTree /> },
|
{ name: "LDAP", icon: <TbBinaryTree /> },
|
||||||
|
{ name: "S3", icon: <TbBucket /> },
|
||||||
];
|
];
|
||||||
|
|
||||||
const useStyles = createStyles((theme) => ({
|
const useStyles = createStyles((theme) => ({
|
||||||
|
|||||||
@@ -436,6 +436,21 @@ export default {
|
|||||||
"admin.config.ldap.field-name-member-of.description": "LDAP-Attributname für die Gruppen, in denen ein Benutzer Mitglied ist. Dies wird bei der Überprüfung der Admin-Gruppe verwendet.",
|
"admin.config.ldap.field-name-member-of.description": "LDAP-Attributname für die Gruppen, in denen ein Benutzer Mitglied ist. Dies wird bei der Überprüfung der Admin-Gruppe verwendet.",
|
||||||
"admin.config.ldap.field-name-email": "Attributname für die E-Mail-Adresse des Benutzers",
|
"admin.config.ldap.field-name-email": "Attributname für die E-Mail-Adresse des Benutzers",
|
||||||
"admin.config.ldap.field-name-email.description": "LDAP-Attributname für die E-Mail-Adresse eines Benutzers.",
|
"admin.config.ldap.field-name-email.description": "LDAP-Attributname für die E-Mail-Adresse eines Benutzers.",
|
||||||
|
"admin.config.category.s3": "S3",
|
||||||
|
"admin.config.s3.enabled": "Aktiviert",
|
||||||
|
"admin.config.s3.enabled.description": "Ob S3 verwendet werden soll, um die freigegebenen Dateien anstelle des lokalen Dateisystems zu speichern.",
|
||||||
|
"admin.config.s3.endpoint": "Endpunkt",
|
||||||
|
"admin.config.s3.endpoint.description": "Die URL des S3-Buckets.",
|
||||||
|
"admin.config.s3.region": "Region",
|
||||||
|
"admin.config.s3.region.description": "Die Region des S3-Buckets.",
|
||||||
|
"admin.config.s3.bucket-name": "Bucket-Name",
|
||||||
|
"admin.config.s3.bucket-name.description": "Der Name des S3-Buckets.",
|
||||||
|
"admin.config.s3.bucket-path": "Pfad",
|
||||||
|
"admin.config.s3.bucket-path.description": "Der Standardpfad, der zum Speichern der Dateien im S3-Bucket verwendet werden soll.",
|
||||||
|
"admin.config.s3.key": "Schlüssel",
|
||||||
|
"admin.config.s3.secret": "Geheimnis",
|
||||||
|
"admin.config.s3.key.description": "Der Schlüssel, der den Zugriff auf den S3-Bucket ermöglicht.",
|
||||||
|
"admin.config.s3.secret.description": "Das Geheimnis, das den Zugriff auf den S3-Bucket ermöglicht.",
|
||||||
"admin.config.notify.success": "Configuration updated successfully.",
|
"admin.config.notify.success": "Configuration updated successfully.",
|
||||||
"admin.config.notify.logo-success": "Logo updated successfully. It may take a few minutes to update on the website.",
|
"admin.config.notify.logo-success": "Logo updated successfully. It may take a few minutes to update on the website.",
|
||||||
"admin.config.notify.no-changes": "No changes to save.",
|
"admin.config.notify.no-changes": "No changes to save.",
|
||||||
|
|||||||
@@ -626,6 +626,22 @@ export default {
|
|||||||
"Logo updated successfully. It may take a few minutes to update on the website.",
|
"Logo updated successfully. It may take a few minutes to update on the website.",
|
||||||
"admin.config.notify.no-changes": "No changes to save.",
|
"admin.config.notify.no-changes": "No changes to save.",
|
||||||
|
|
||||||
|
"admin.config.category.s3": "S3",
|
||||||
|
"admin.config.s3.enabled": "Enabled",
|
||||||
|
"admin.config.s3.enabled.description": "Whether S3 should be used to store the shared files instead of the local file system.",
|
||||||
|
"admin.config.s3.endpoint": "Endpoint",
|
||||||
|
"admin.config.s3.endpoint.description": "The URL of the S3 bucket.",
|
||||||
|
"admin.config.s3.region": "Region",
|
||||||
|
"admin.config.s3.region.description": "The region of the S3 bucket.",
|
||||||
|
"admin.config.s3.bucket-name": "Bucket name",
|
||||||
|
"admin.config.s3.bucket-name.description": "The name of the S3 bucket.",
|
||||||
|
"admin.config.s3.bucket-path": "Path",
|
||||||
|
"admin.config.s3.bucket-path.description": "The default path which should be used to store the files in the S3 bucket.",
|
||||||
|
"admin.config.s3.key": "Key",
|
||||||
|
"admin.config.s3.key.description": "The key which allows you to access the S3 bucket.",
|
||||||
|
"admin.config.s3.secret": "Secret",
|
||||||
|
"admin.config.s3.secret.description": "The secret which allows you to access the S3 bucket.",
|
||||||
|
|
||||||
// 404
|
// 404
|
||||||
"404.description": "Oops this page doesn't exist.",
|
"404.description": "Oops this page doesn't exist.",
|
||||||
"404.button.home": "Bring me back home",
|
"404.button.home": "Bring me back home",
|
||||||
|
|||||||
Reference in New Issue
Block a user