UBERF-6854: S3 provider (#5611)

Signed-off-by: Andrey Sobolev <haiodo@gmail.com>
This commit is contained in:
Andrey Sobolev 2024-05-17 17:08:30 +07:00 committed by GitHub
parent 654bfa7742
commit 1041755e29
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 1791 additions and 317 deletions

File diff suppressed because it is too large Load Diff

1
dev/.env Normal file
View File

@ -0,0 +1 @@
STORAGE_CONFIG="minio|minio?accessKey=minioadmin&secretKey=minioadmin"

View File

@ -54,9 +54,7 @@ services:
- MONGO_URL=mongodb://mongodb:27017
- TRANSACTOR_URL=ws://transactor:3333
- ENDPOINT_URL=ws://localhost:3333
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- STORAGE_CONFIG=${STORAGE_CONFIG}
- FRONT_URL=http://front:8080
- INIT_WORKSPACE=demo-tracker
- MODEL_ENABLED=*
@ -86,9 +84,7 @@ services:
- ELASTIC_URL=http://elastic:9200
- COLLABORATOR_URL=ws://localhost:3078
- COLLABORATOR_API_URL=http://localhost:3078
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- STORAGE_CONFIG=${STORAGE_CONFIG}
- TITLE=DevPlatform
- DEFAULT_LANGUAGE=ru
- LAST_NAME_FIRST=true
@ -108,9 +104,7 @@ services:
- TRANSACTOR_URL=ws://transactor:3333
- UPLOAD_URL=/files
- MONGO_URL=mongodb://mongodb:27017
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- STORAGE_CONFIG=${STORAGE_CONFIG}
restart: unless-stopped
# tracker-front:
# image: hardcoreeng/tracker-front
@ -129,9 +123,7 @@ services:
# - UPLOAD_URL=/files
# - TRANSACTOR_URL=ws://localhost:3333
# - ELASTIC_URL=http://elastic:9200
# - MINIO_ENDPOINT=minio
# - MINIO_ACCESS_KEY=minioadmin
# - MINIO_SECRET_KEY=minioadmin
# - STORAGE_CONFIG=${STORAGE_CONFIG}
transactor:
image: hardcoreeng/transactor
links:
@ -151,9 +143,7 @@ services:
- MONGO_URL=mongodb://mongodb:27017
- METRICS_CONSOLE=false
- METRICS_FILE=metrics.txt
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- STORAGE_CONFIG=${STORAGE_CONFIG}
- REKONI_URL=http://rekoni:4004
- FRONT_URL=http://localhost:8087
- UPLOAD_URL=http://localhost:8087/files

View File

@ -1,36 +0,0 @@
#!/bin/bash
# Copyright © 2020, 2021 Anticrm Platform Contributors.
# Copyright © 2021 Hardcore Engineering Inc.
#
# Licensed under the Eclipse Public License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at https://www.eclipse.org/legal/epl-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
export MONGO_URL=$(kubectl get configmaps anticrm-config -o jsonpath="{.data.mongoDbUrl}")
export ELASTIC_URL=$(kubectl get configmaps anticrm-config -o jsonpath="{.data.elasticUrl}")
export MINIO_ENDPOINT=$(kubectl get configmaps anticrm-config -o jsonpath="{.data.minioEndpointUrl}")
export MINIO_ACCESS_KEY=$(kubectl get secret anticrm-secret -o jsonpath="{.data.minioAccessKey}" | base64 --decode)
export MINIO_SECRET_KEY=$(kubectl get secret anticrm-secret -o jsonpath="{.data.minioSecretKey}" | base64 --decode)
export SERVER_SECRET=$(kubectl get secret anticrm-secret -o jsonpath="{.data.serverSecret}" | base64 --decode)
kubectl run anticrm-tool --rm --tty -i --restart='Never' \
--env="MONGO_URL=$MONGO_URL" \
--env="TRANSACTOR_URL=ws://transactor/" \
--env="ELASTIC_URL=$ELASTIC_URL" \
--env="TELEGRAM_DATABASE=telegram-service" \
--env="MINIO_ENDPOINT=$MINIO_ENDPOINT" \
--env="MINIO_ACCESS_KEY=$MINIO_ACCESS_KEY" \
--env="MINIO_SECRET_KEY=$MINIO_SECRET_KEY" \
--env="SERVER_SECRET=$SERVER_SECRET" \
--image hardcoreeng/tool:v0.6.40 --command -- bash

View File

@ -73,9 +73,13 @@ export async function cleanWorkspace (
attachments.map((it) => it.file).concat(contacts.map((it) => it.avatar).filter((it) => it) as string[])
)
const minioList = await storageAdapter.list(ctx, workspaceId)
const minioList = await storageAdapter.listStream(ctx, workspaceId)
const toClean: string[] = []
for (const mv of minioList) {
while (true) {
const mv = await minioList.next()
if (mv === undefined) {
break
}
if (!files.has(mv._id)) {
toClean.push(mv._id)
}
@ -158,10 +162,13 @@ export async function fixMinioBW (
): Promise<void> {
console.log('try clean bw miniature for ', workspaceId.name)
const from = new Date(new Date().setDate(new Date().getDate() - 7)).getTime()
const list = await storageService.list(ctx, workspaceId)
console.log('found', list.length)
const list = await storageService.listStream(ctx, workspaceId)
let removed = 0
for (const obj of list) {
while (true) {
const obj = await list.next()
if (obj === undefined) {
break
}
if (obj.modifiedOn < from) continue
if ((obj._id as string).includes('%size%')) {
await storageService.remove(ctx, workspaceId, [obj._id])

View File

@ -37,7 +37,6 @@ export interface StorageAdapter {
delete: (ctx: MeasureContext, workspaceId: WorkspaceId) => Promise<void>
remove: (ctx: MeasureContext, workspaceId: WorkspaceId, objectNames: string[]) => Promise<void>
list: (ctx: MeasureContext, workspaceId: WorkspaceId, prefix?: string) => Promise<ListBlobResult[]>
listStream: (ctx: MeasureContext, workspaceId: WorkspaceId, prefix?: string) => Promise<BlobStorageIterator>
stat: (ctx: MeasureContext, workspaceId: WorkspaceId, objectName: string) => Promise<Blob | undefined>
get: (ctx: MeasureContext, workspaceId: WorkspaceId, objectName: string) => Promise<Readable>
@ -135,3 +134,29 @@ export class DummyStorageAdapter implements StorageAdapter, StorageAdapterEx {
export function createDummyStorageAdapter (): StorageAdapter {
return new DummyStorageAdapter()
}
export async function removeAllObjects (
ctx: MeasureContext,
storage: StorageAdapter,
workspaceId: WorkspaceId,
prefix?: string
): Promise<void> {
// We need to list all files and delete them
const iterator = await storage.listStream(ctx, workspaceId, prefix)
let bulk: string[] = []
while (true) {
const obj = await iterator.next()
if (obj === undefined) {
break
}
bulk.push(obj.storageId)
if (bulk.length > 50) {
await storage.remove(ctx, workspaceId, bulk)
bulk = []
}
}
if (bulk.length > 0) {
await storage.remove(ctx, workspaceId, bulk)
bulk = []
}
}

View File

@ -56,6 +56,7 @@
"@hcengineering/server-collaboration": "^0.6.0",
"@hcengineering/server-collaboration-resources": "^0.6.0",
"@hcengineering/server": "^0.6.4",
"@hcengineering/server-storage": "^0.6.0",
"@hcengineering/mongo": "^0.6.1",
"@hcengineering/elastic": "^0.6.0",
"elastic-apm-node": "~3.26.0",

View File

@ -18,7 +18,8 @@
import contactPlugin from '@hcengineering/contact'
import notification from '@hcengineering/notification'
import { setMetadata } from '@hcengineering/platform'
import { serverConfigFromEnv, storageConfigFromEnv } from '@hcengineering/server'
import { serverConfigFromEnv } from '@hcengineering/server'
import { storageConfigFromEnv } from '@hcengineering/server-storage'
import serverCore, { type StorageConfiguration } from '@hcengineering/server-core'
import serverNotification from '@hcengineering/server-notification'
import serverToken from '@hcengineering/server-token'

View File

@ -1417,6 +1417,11 @@
"projectFolder": "server/minio",
"shouldPublish": false
},
{
"packageName": "@hcengineering/s3",
"projectFolder": "server/s3",
"shouldPublish": false
},
{
"packageName": "@hcengineering/bitrix",
"projectFolder": "plugins/bitrix",

View File

@ -45,7 +45,7 @@ import core, {
} from '@hcengineering/core'
import notification, { Collaborators } from '@hcengineering/notification'
import { getMetadata } from '@hcengineering/platform'
import serverCore, { TriggerControl } from '@hcengineering/server-core'
import serverCore, { TriggerControl, removeAllObjects } from '@hcengineering/server-core'
import { workbenchId } from '@hcengineering/workbench'
export async function OnSpaceTypeMembers (tx: Tx, control: TriggerControl): Promise<Tx[]> {
@ -149,14 +149,7 @@ export async function OnContactDelete (
await storageAdapter.remove(ctx, workspace, [avatar])
if (avatar != null) {
const extra = await storageAdapter.list(ctx, workspace, avatar)
if (extra.length > 0) {
await storageAdapter.remove(
ctx,
workspace,
Array.from(extra.entries()).map((it) => it[1]._id)
)
}
await removeAllObjects(ctx, storageAdapter, workspace, avatar)
}
const result: Tx[] = []

View File

@ -52,7 +52,7 @@ import toolPlugin, { connect, initModel, upgradeModel } from '@hcengineering/ser
import { pbkdf2Sync, randomBytes } from 'crypto'
import { Binary, Db, Filter, ObjectId, type MongoClient } from 'mongodb'
import fetch from 'node-fetch'
import type { StorageAdapter } from '../../core/types'
import { type StorageAdapter } from '../../core/types'
import { accountPlugin } from './plugin'
const WORKSPACE_COLLECTION = 'workspace'
@ -1775,12 +1775,6 @@ export async function dropWorkspaceFull (
const wspace = getWorkspaceId(workspaceId, productId)
const hasBucket = await storageAdapter?.exists(ctx, wspace)
if (storageAdapter !== undefined && hasBucket === true) {
const docs = await storageAdapter.list(ctx, wspace)
await storageAdapter.remove(
ctx,
wspace,
docs.map((it) => it._id)
)
await storageAdapter.delete(ctx, wspace)
}
ctx.info('Workspace fully dropped', { workspace: ws.workspace })

View File

@ -34,9 +34,9 @@ export function startBackup (ctx: MeasureContext): void {
}
const storageAdapter: StorageAdapter = new MinioService({
endPoint: minioEndpoint,
endpoint: minioEndpoint,
port: minioPort,
useSSL: false,
useSSL: 'false',
accessKey: config.MinioAccessKey,
secretKey: config.MinioSecretKey
})

View File

@ -28,10 +28,6 @@ export interface Config {
TransactorUrl: string
MongoUrl: string
UploadUrl: string
MinioEndpoint: string
MinioAccessKey: string
MinioSecretKey: string
}
const envMap: { [key in keyof Config]: string } = {
@ -42,23 +38,10 @@ const envMap: { [key in keyof Config]: string } = {
AccountsUrl: 'ACCOUNTS_URL',
TransactorUrl: 'TRANSACTOR_URL',
MongoUrl: 'MONGO_URL',
UploadUrl: 'UPLOAD_URL',
MinioEndpoint: 'MINIO_ENDPOINT',
MinioAccessKey: 'MINIO_ACCESS_KEY',
MinioSecretKey: 'MINIO_SECRET_KEY'
UploadUrl: 'UPLOAD_URL'
}
const required: Array<keyof Config> = [
'Secret',
'ServiceID',
'Port',
'AccountsUrl',
'TransactorUrl',
'MongoUrl',
'MinioEndpoint',
'MinioAccessKey',
'MinioSecretKey'
]
const required: Array<keyof Config> = ['Secret', 'ServiceID', 'Port', 'AccountsUrl', 'TransactorUrl', 'MongoUrl']
const config: Config = (() => {
const params: Partial<Config> = {
@ -69,10 +52,7 @@ const config: Config = (() => {
AccountsUrl: process.env[envMap.AccountsUrl],
TransactorUrl: process.env[envMap.TransactorUrl],
MongoUrl: process.env[envMap.MongoUrl],
UploadUrl: process.env[envMap.UploadUrl] ?? '/files',
MinioEndpoint: process.env[envMap.MinioEndpoint],
MinioAccessKey: process.env[envMap.MinioAccessKey],
MinioSecretKey: process.env[envMap.MinioSecretKey]
UploadUrl: process.env[envMap.UploadUrl] ?? '/files'
}
const missingEnv = required.filter((key) => params[key] === undefined).map((key) => envMap[key])

View File

@ -93,13 +93,6 @@ export class AggregatorStorageAdapter implements StorageAdapter, StorageAdapterE
await this.dbAdapter.clean(ctx, workspaceId, DOMAIN_BLOB, objectNames as Ref<Blob>[])
}
async list (ctx: MeasureContext, workspaceId: WorkspaceId, prefix?: string | undefined): Promise<ListBlobResult[]> {
return await this.dbAdapter.find<Blob>(ctx, workspaceId, DOMAIN_BLOB, {
_class: core.class.Blob,
_id: { $regex: `${prefix ?? ''}.*` }
})
}
async listStream (
ctx: MeasureContext,
workspaceId: WorkspaceId,

View File

@ -197,7 +197,6 @@ export function createNullStorageFactory (): StorageAdapter {
make: async (ctx, workspaceId: WorkspaceId) => {},
remove: async (ctx, workspaceId: WorkspaceId, objectNames: string[]) => {},
delete: async (ctx, workspaceId: WorkspaceId) => {},
list: async (ctx, workspaceId: WorkspaceId, prefix?: string) => [],
listStream: async (ctx, workspaceId, prefix) => {
return {
next: async () => undefined,

View File

@ -425,6 +425,8 @@ export interface ServiceAdapterConfig {
export interface StorageConfig {
name: string
kind: string
endpoint: string
port?: number
}
export interface StorageConfiguration {

View File

@ -51,6 +51,7 @@
"cors": "^2.8.5",
"@hcengineering/elastic": "^0.6.0",
"@hcengineering/server-core": "^0.6.1",
"@hcengineering/storage": "^0.6.0",
"@hcengineering/server-storage": "^0.6.0",
"@hcengineering/server-token": "^0.6.7",
"@hcengineering/attachment": "^0.6.9",

View File

@ -16,7 +16,7 @@
import { Analytics } from '@hcengineering/analytics'
import { MeasureContext, WorkspaceId, metricsAggregate } from '@hcengineering/core'
import { StorageAdapter } from '@hcengineering/server-core'
import { StorageAdapter, removeAllObjects } from '@hcengineering/storage'
import { Token, decodeToken } from '@hcengineering/server-token'
import bp from 'body-parser'
import cors from 'cors'
@ -533,14 +533,7 @@ export function start (
// TODO: Add support for related documents.
// TODO: Move support of image resize/format change to separate place.
const extra = await config.storageAdapter.list(ctx, payload.workspace, uuid)
if (extra.length > 0) {
await config.storageAdapter.remove(
ctx,
payload.workspace,
Array.from(extra.entries()).map((it) => it[1]._id)
)
}
await removeAllObjects(ctx, config.storageAdapter, payload.workspace, uuid)
res.status(200).send()
} catch (error: any) {

View File

@ -31,10 +31,10 @@
"@types/node": "~20.11.16",
"jest": "^29.7.0",
"ts-jest": "^29.1.1",
"@types/jest": "^29.5.5"
"@types/jest": "^29.5.5",
"@types/minio": "~7.0.11"
},
"dependencies": {
"@types/minio": "~7.0.11",
"@hcengineering/core": "^0.6.28",
"@hcengineering/server-core": "^0.6.1",
"minio": "^7.0.26"

View File

@ -24,6 +24,7 @@ import core, {
} from '@hcengineering/core'
import {
removeAllObjects,
type BlobStorageIterator,
type ListBlobResult,
type StorageAdapter,
@ -41,13 +42,10 @@ export function getBucketId (workspaceId: WorkspaceId): string {
export interface MinioConfig extends StorageConfig {
kind: 'minio'
region: string
endpoint: string
accessKeyId: string
secretAccessKey: string
port: number
useSSL: boolean
accessKey: string
secretKey: string
useSSL?: string
region?: string
}
/**
@ -56,17 +54,15 @@ export interface MinioConfig extends StorageConfig {
export class MinioService implements StorageAdapter {
static config = 'minio'
client: Client
constructor (
readonly opt: {
endPoint: string
port: number
accessKey: string
secretKey: string
useSSL: boolean
region?: string
}
) {
this.client = new Client(opt)
constructor (readonly opt: Omit<MinioConfig, 'name' | 'kind'>) {
this.client = new Client({
endPoint: opt.endpoint,
accessKey: opt.accessKey,
secretKey: opt.secretKey,
region: opt.region ?? 'us-east-1',
port: opt.port ?? 9000,
useSSL: opt.useSSL === 'true'
})
}
async initialize (ctx: MeasureContext, workspaceId: WorkspaceId): Promise<void> {}
@ -78,7 +74,7 @@ export class MinioService implements StorageAdapter {
}
async make (ctx: MeasureContext, workspaceId: WorkspaceId): Promise<void> {
await this.client.makeBucket(getBucketId(workspaceId), this.opt.region)
await this.client.makeBucket(getBucketId(workspaceId), this.opt.region ?? 'us-east-1')
}
async remove (ctx: MeasureContext, workspaceId: WorkspaceId, objectNames: string[]): Promise<void> {
@ -86,48 +82,10 @@ export class MinioService implements StorageAdapter {
}
async delete (ctx: MeasureContext, workspaceId: WorkspaceId): Promise<void> {
await removeAllObjects(ctx, this, workspaceId)
await this.client.removeBucket(getBucketId(workspaceId))
}
async list (ctx: MeasureContext, workspaceId: WorkspaceId, prefix?: string): Promise<ListBlobResult[]> {
try {
const items = new Map<string, ListBlobResult>()
const list = this.client.listObjects(getBucketId(workspaceId), prefix, true)
await new Promise((resolve, reject) => {
list.on('data', (data) => {
if (data.name !== undefined) {
items.set(data.name, {
_id: data.name as Ref<Blob>,
_class: core.class.Blob,
etag: data.etag,
size: data.size,
provider: 'minio',
space: core.space.Configuration,
modifiedBy: core.account.ConfigUser,
modifiedOn: data.lastModified.getTime(),
storageId: data.name
})
}
})
list.on('end', () => {
list.destroy()
resolve(null)
})
list.on('error', (err) => {
list.destroy()
reject(err)
})
})
return Array.from(items.values())
} catch (err: any) {
const msg = (err?.message as string) ?? ''
if (msg.includes('Invalid bucket name') || msg.includes('The specified bucket does not exist')) {
return []
}
throw err
}
}
async listStream (
ctx: MeasureContext,
workspaceId: WorkspaceId,

7
server/s3/.eslintrc.js Normal file
View File

@ -0,0 +1,7 @@
module.exports = {
extends: ['./node_modules/@hcengineering/platform-rig/profiles/node/eslint.config.json'],
parserOptions: {
tsconfigRootDir: __dirname,
project: './tsconfig.json'
}
}

4
server/s3/.npmignore Normal file
View File

@ -0,0 +1,4 @@
*
!/lib/**
!CHANGELOG.md
/lib/**/__tests__/

View File

@ -0,0 +1,5 @@
{
"$schema": "https://developer.microsoft.com/json-schemas/rig-package/rig.schema.json",
"rigPackageName": "@hcengineering/platform-rig",
"rigProfile": "node"
}

7
server/s3/jest.config.js Normal file
View File

@ -0,0 +1,7 @@
module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
testMatch: ['**/?(*.)+(spec|test).[jt]s?(x)'],
roots: ["./src"],
coverageReporters: ["text-summary", "html"]
}

42
server/s3/package.json Normal file
View File

@ -0,0 +1,42 @@
{
"name": "@hcengineering/s3",
"version": "0.6.0",
"main": "lib/index.js",
"svelte": "src/index.ts",
"types": "types/index.d.ts",
"author": "Anticrm Platform Contributors",
"template": "@hcengineering/node-package",
"license": "EPL-2.0",
"scripts": {
"build": "compile",
"build:watch": "compile",
"test": "jest --passWithNoTests --silent --forceExit",
"format": "format src",
"_phase:build": "compile transpile src",
"_phase:test": "jest --passWithNoTests --silent --forceExit",
"_phase:format": "format src",
"_phase:validate": "compile validate"
},
"devDependencies": {
"@hcengineering/platform-rig": "^0.6.0",
"@typescript-eslint/eslint-plugin": "^6.11.0",
"eslint-plugin-import": "^2.26.0",
"eslint-plugin-promise": "^6.1.1",
"eslint-plugin-n": "^15.4.0",
"eslint": "^8.54.0",
"@typescript-eslint/parser": "^6.11.0",
"eslint-config-standard-with-typescript": "^40.0.0",
"prettier": "^3.1.0",
"typescript": "^5.3.3",
"@types/node": "~20.11.16",
"jest": "^29.7.0",
"ts-jest": "^29.1.1",
"@types/jest": "^29.5.5"
},
"dependencies": {
"@hcengineering/core": "^0.6.28",
"@hcengineering/server-core": "^0.6.1",
"@hcengineering/storage": "^0.6.0",
"@aws-sdk/client-s3": "^3.575.0"
}
}

305
server/s3/src/index.ts Normal file
View File

@ -0,0 +1,305 @@
//
// Copyright © 2022 Hardcore Engineering Inc.
//
// Licensed under the Eclipse Public License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. You may
// obtain a copy of the License at https://www.eclipse.org/legal/epl-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
import { S3 } from '@aws-sdk/client-s3'
import core, {
toWorkspaceString,
withContext,
type Blob,
type MeasureContext,
type Ref,
type WorkspaceId
} from '@hcengineering/core'
import {
type BlobStorageIterator,
type ListBlobResult,
type StorageAdapter,
type StorageConfig,
type UploadedObjectInfo
} from '@hcengineering/server-core'
import { Readable } from 'stream'
import { removeAllObjects } from '@hcengineering/storage'
import type { ReadableStream } from 'stream/web'
export interface S3Config extends StorageConfig {
kind: 's3'
accessKey: string
secretKey: string
region?: string
// If defined, all resources will be inside selected root bucket.
rootBucket?: string
// A prefix string to be added to a bucketId in case rootBucket not used
bucketPrefix?: string
}
/**
* @public
*/
export class S3Service implements StorageAdapter {
static config = 's3'
client: S3
constructor (readonly opt: S3Config) {
this.client = new S3({
endpoint: opt.endpoint,
credentials: {
accessKeyId: opt.accessKey,
secretAccessKey: opt.secretKey
},
region: opt.region ?? 'auto'
})
}
/**
* @public
*/
getBucketId (workspaceId: WorkspaceId): string {
return this.opt.rootBucket ?? (this.opt.bucketPrefix ?? '') + toWorkspaceString(workspaceId, '.')
}
getBucketFolder (workspaceId: WorkspaceId): string {
return toWorkspaceString(workspaceId, '.')
}
async initialize (ctx: MeasureContext, workspaceId: WorkspaceId): Promise<void> {}
async close (): Promise<void> {}
async exists (ctx: MeasureContext, workspaceId: WorkspaceId): Promise<boolean> {
try {
const result = await this.client.headBucket({
Bucket: this.getBucketId(workspaceId)
})
return result.$metadata.httpStatusCode === 200
} catch (err: any) {
if (err.name === '400') {
// No bucket exisrs
return false
}
// Ignore
console.error(err)
}
// No API to check is bucket exists or not, so we need to call make and check if it already exists.
return false
}
@withContext('make')
async make (ctx: MeasureContext, workspaceId: WorkspaceId): Promise<void> {
try {
await this.client.createBucket({
Bucket: this.getBucketId(workspaceId)
})
} catch (err: any) {
console.error(err)
}
}
getDocumentKey (workspace: WorkspaceId, name: string): string {
return this.opt.rootBucket === undefined ? name : `${this.getBucketFolder(workspace)}/${name}`
}
@withContext('remove')
async remove (ctx: MeasureContext, workspaceId: WorkspaceId, objectNames: string[]): Promise<void> {
await this.client.deleteObjects({
Bucket: this.getBucketId(workspaceId),
Delete: {
Objects: objectNames.map((it) => ({ Key: this.getDocumentKey(workspaceId, it) }))
}
})
}
@withContext('delete')
async delete (ctx: MeasureContext, workspaceId: WorkspaceId): Promise<void> {
if (this.opt.rootBucket === undefined) {
// We should
await this.client.deleteBucket({
Bucket: this.getBucketId(workspaceId)
})
} else {
await removeAllObjects(ctx, this, workspaceId, this.getBucketFolder(workspaceId) + '/')
}
}
stripPrefix (prefix: string | undefined, key: string): string {
if (prefix !== undefined && key.startsWith(prefix)) {
return key.slice(prefix.length)
}
return key
}
@withContext('listStream')
async listStream (
ctx: MeasureContext,
workspaceId: WorkspaceId,
prefix?: string | undefined
): Promise<BlobStorageIterator> {
const hasMore = true
const buffer: ListBlobResult[] = []
let token: string | undefined
const rootPrefix = this.opt.rootBucket !== undefined ? this.getBucketFolder(workspaceId) + '/' : undefined
return {
next: async (): Promise<ListBlobResult | undefined> => {
try {
if (hasMore) {
const res = await this.client.listObjectsV2({
Bucket: this.getBucketId(workspaceId),
Prefix: rootPrefix !== undefined ? rootPrefix + (prefix ?? '') : prefix ?? '',
ContinuationToken: token
})
if (res.IsTruncated === true) {
token = res.NextContinuationToken
}
for (const data of res.Contents ?? []) {
const _id = this.stripPrefix(rootPrefix, data.Key ?? '')
buffer.push({
_id: _id as Ref<Blob>,
_class: core.class.Blob,
etag: data.ETag ?? '',
size: data.Size ?? 0,
provider: 's3',
space: core.space.Configuration,
modifiedBy: core.account.ConfigUser,
modifiedOn: data.LastModified?.getTime() ?? 0,
storageId: _id
})
}
}
} catch (err: any) {
console.error(err)
}
if (buffer.length > 0) {
return buffer.shift()
}
if (!hasMore) {
return undefined
}
},
close: async () => {}
}
}
async stat (ctx: MeasureContext, workspaceId: WorkspaceId, objectName: string): Promise<Blob | undefined> {
try {
const result = await this.client.headObject({
Bucket: this.getBucketId(workspaceId),
Key: this.getDocumentKey(workspaceId, objectName)
})
return {
provider: '',
_class: core.class.Blob,
_id: objectName as Ref<Blob>,
storageId: objectName,
contentType: result.ContentType ?? '',
size: result.ContentLength ?? 0,
etag: result.ETag ?? '',
space: core.space.Configuration,
modifiedBy: core.account.System,
modifiedOn: result.LastModified?.getTime() ?? 0,
version: result.VersionId ?? null
}
} catch (err: any) {
ctx.error('no object found', { error: err, objectName, workspaceId: workspaceId.name })
}
}
@withContext('get')
async get (ctx: MeasureContext, workspaceId: WorkspaceId, objectName: string): Promise<Readable> {
return await this.doGet(ctx, workspaceId, objectName)
}
async doGet (ctx: MeasureContext, workspaceId: WorkspaceId, objectName: string, range?: string): Promise<Readable> {
const res = await this.client.getObject({
Bucket: this.getBucketId(workspaceId),
Key: this.getDocumentKey(workspaceId, objectName),
Range: range
})
const stream = res.Body?.transformToWebStream()
if (stream !== undefined) {
return Readable.fromWeb(stream as ReadableStream<any>)
} else {
const readable = new Readable()
readable._read = () => {}
readable.push(null)
return readable
}
}
@withContext('put')
async put (
ctx: MeasureContext,
workspaceId: WorkspaceId,
objectName: string,
stream: Readable | Buffer | string,
contentType: string,
size?: number
): Promise<UploadedObjectInfo> {
const result = await this.client.putObject({
Bucket: this.getBucketId(workspaceId),
Key: this.getDocumentKey(workspaceId, objectName),
ContentLength: size,
ContentType: contentType,
Body: stream
})
return {
etag: result.ETag ?? '',
versionId: result.VersionId ?? null
}
}
@withContext('read')
async read (ctx: MeasureContext, workspaceId: WorkspaceId, name: string): Promise<Buffer[]> {
const data = await this.doGet(ctx, workspaceId, name)
const chunks: Buffer[] = []
await new Promise((resolve, reject) => {
data.on('readable', () => {
let chunk
while ((chunk = data.read()) !== null) {
const b = chunk as Buffer
chunks.push(b)
}
})
data.on('end', () => {
data.destroy()
resolve(null)
})
data.on('error', (err) => {
reject(err)
})
})
return chunks
}
async partial (
ctx: MeasureContext,
workspaceId: WorkspaceId,
objectName: string,
offset: number,
length?: number
): Promise<Readable> {
const range = length !== undefined ? `bytes=${offset}-${offset + length}` : `bytes=${offset}-`
return await this.doGet(ctx, workspaceId, objectName, range)
}
}

10
server/s3/tsconfig.json Normal file
View File

@ -0,0 +1,10 @@
{
"extends": "./node_modules/@hcengineering/platform-rig/profiles/node/tsconfig.json",
"compilerOptions": {
"rootDir": "./src",
"outDir": "./lib",
"declarationDir": "./types",
"tsBuildInfoFile": ".build/build.tsbuildinfo"
}
}

View File

@ -42,8 +42,8 @@
"@hcengineering/server-ws": "^0.6.11",
"@hcengineering/mongo": "^0.6.1",
"@hcengineering/minio": "^0.6.0",
"@hcengineering/s3": "^0.6.0",
"elastic-apm-node": "~3.26.0",
"@hcengineering/server-token": "^0.6.7",
"got": "^11.8.3"
"@hcengineering/server-token": "^0.6.7"
}
}

View File

@ -101,9 +101,7 @@ export async function createStorageDataAdapter (
}
// We need to create bucket if it doesn't exist
if (storage !== undefined) {
if (!(await storage.exists(ctx, workspaceId))) {
await storage.make(ctx, workspaceId)
}
await storage.make(ctx, workspaceId)
}
const blobAdapter = await createMongoAdapter(ctx, hierarchy, url, workspaceId, modelDb, undefined, {
calculateHash: (d) => {

View File

@ -0,0 +1,41 @@
import { MinioConfig } from '@hcengineering/minio'
import { StorageConfiguration } from '@hcengineering/server-core'
export function addMinioFallback (storageConfig: StorageConfiguration): void {
let minioEndpoint = process.env.MINIO_ENDPOINT
if (minioEndpoint === undefined) {
console.error('MINIO_ENDPOINT is required')
process.exit(1)
}
const minioAccessKey = process.env.MINIO_ACCESS_KEY
if (minioAccessKey === undefined) {
console.error('MINIO_ACCESS_KEY is required')
process.exit(1)
}
let minioPort = 9000
const sp = minioEndpoint.split(':')
if (sp.length > 1) {
minioEndpoint = sp[0]
minioPort = parseInt(sp[1])
}
const minioSecretKey = process.env.MINIO_SECRET_KEY
if (minioSecretKey === undefined) {
console.error('MINIO_SECRET_KEY is required')
process.exit(1)
}
const minioConfig: MinioConfig = {
kind: 'minio',
name: 'minio',
port: minioPort,
region: 'us-east-1',
useSSL: 'false',
endpoint: minioEndpoint,
accessKey: minioAccessKey,
secretKey: minioSecretKey
}
storageConfig.storages.push(minioConfig)
storageConfig.default = 'minio'
}

View File

@ -1,66 +1,90 @@
import { MinioConfig, MinioService } from '@hcengineering/minio'
import { createRawMongoDBAdapter } from '@hcengineering/mongo'
import { StorageAdapter, StorageConfiguration, buildStorage } from '@hcengineering/server-core'
import { S3Service, type S3Config } from '@hcengineering/s3'
import { StorageAdapter, StorageConfiguration, buildStorage, type StorageConfig } from '@hcengineering/server-core'
import { addMinioFallback } from './minio'
/*
A ';' separated list of URI's to configure the storage adapters.
Each config is in `kind|name|uri` format.
Each config is in `kind===name|uri` format.
Last one is used as default one.
Example:
STORAGE_CONFIG=kind|minio|minio:9000?accessKey=minio&secretKey=minio&useSSL=false;\
s3|https://s3.amazonaws.com?accessKey=${ACCESS_KEY}&secretKey=${SECRET_KEY}&region=us-east-1
*/
export function storageConfigFromEnv (): StorageConfiguration {
const storageConfig: StorageConfiguration = JSON.parse(
process.env.STORAGE_CONFIG ?? '{ "default": "", "storages": []}'
)
const storageConfig: StorageConfiguration = { default: '', storages: [] }
const storageEnv = process.env.STORAGE_CONFIG
if (storageEnv !== undefined) {
parseStorageEnv(storageEnv, storageConfig)
}
if (storageConfig.storages.length === 0 || storageConfig.default === '') {
// 'STORAGE_CONFIG is required for complex configuration, fallback to minio config'
let minioEndpoint = process.env.MINIO_ENDPOINT
if (minioEndpoint === undefined) {
console.error('MINIO_ENDPOINT is required')
process.exit(1)
}
const minioAccessKey = process.env.MINIO_ACCESS_KEY
if (minioAccessKey === undefined) {
console.error('MINIO_ACCESS_KEY is required')
process.exit(1)
}
let minioPort = 9000
const sp = minioEndpoint.split(':')
if (sp.length > 1) {
minioEndpoint = sp[0]
minioPort = parseInt(sp[1])
}
const minioSecretKey = process.env.MINIO_SECRET_KEY
if (minioSecretKey === undefined) {
console.error('MINIO_SECRET_KEY is required')
process.exit(1)
}
const minioConfig: MinioConfig = {
kind: 'minio',
name: 'minio',
port: minioPort,
region: 'us-east-1',
useSSL: false,
endpoint: minioEndpoint,
accessKeyId: minioAccessKey,
secretAccessKey: minioSecretKey
}
storageConfig.storages.push(minioConfig)
storageConfig.default = 'minio'
addMinioFallback(storageConfig)
}
return storageConfig
}
export function parseStorageEnv (storageEnv: string, storageConfig: StorageConfiguration): void {
const storages = storageEnv.split(';')
for (const st of storages) {
if (st.trim().length === 0 || !st.includes('|')) {
throw new Error('Invalid storage config:' + st)
}
let [kind, name, url] = st.split('|')
if (url == null) {
url = name
name = kind
}
let hasProtocol = true
if (!url.includes('://')) {
// No protocol, add empty one
url = 'empty://' + url
hasProtocol = false
}
const uri = new URL(url)
const config: StorageConfig = {
kind,
name,
endpoint: (hasProtocol ? uri.protocol + '//' : '') + uri.hostname, // Port should go away
port: uri.port !== '' ? parseInt(uri.port) : undefined
}
// Add all extra parameters
uri.searchParams.forEach((v, k) => {
;(config as any)[k] = v
})
if (storageConfig.storages.find((it) => it.name === config.name) !== undefined) {
throw new Error(`Duplicated storage name ${config.name}, skipping config:${st}`)
}
storageConfig.storages.push(config)
storageConfig.default = config.name
}
}
export function buildStorageFromConfig (config: StorageConfiguration, dbUrl: string): StorageAdapter {
return buildStorage(config, createRawMongoDBAdapter(dbUrl), (kind, config): StorageAdapter => {
if (kind === MinioService.config) {
const c = config as MinioConfig
return new MinioService({
accessKey: c.accessKeyId,
secretKey: c.secretAccessKey,
endPoint: c.endpoint,
region: c.region,
port: c.port,
useSSL: c.useSSL
})
if (c.endpoint == null || c.accessKey == null || c.secretKey == null) {
throw new Error('One of endpoint/accessKey/secretKey values are not specified')
}
return new MinioService(c)
} else if (kind === S3Service.config) {
const c = config as S3Config
if (c.endpoint == null || c.accessKey == null || c.secretKey == null) {
throw new Error('One of endpoint/accessKey/secretKey values are not specified')
}
return new S3Service(c)
} else {
throw new Error('Unsupported storage kind:' + kind)
}

View File

@ -0,0 +1,60 @@
import type { MinioConfig } from '@hcengineering/minio'
import type { S3Config } from '@hcengineering/s3'
import type { StorageConfiguration } from '@hcengineering/server-core'
import { parseStorageEnv } from '../starter'
describe('config-parse', () => {
it('single-minio', async () => {
const cfg: StorageConfiguration = { default: '', storages: [] }
parseStorageEnv('minio|localhost:9000?accessKey=minio&secretKey=minio2', cfg)
expect(cfg.default).toEqual('minio')
const minio = cfg.storages[0] as MinioConfig
expect(minio.endpoint).toEqual('localhost')
expect(minio.port).toEqual(9000)
expect(minio.accessKey).toEqual('minio')
expect(minio.secretKey).toEqual('minio2')
})
it('single-minio-named', async () => {
const cfg: StorageConfiguration = { default: '', storages: [] }
parseStorageEnv('minio|myminio|localhost:9000?accessKey=minio&secretKey=minio2', cfg)
expect(cfg.default).toEqual('myminio')
const minio = cfg.storages[0] as MinioConfig
expect(minio.endpoint).toEqual('localhost')
expect(minio.port).toEqual(9000)
expect(minio.accessKey).toEqual('minio')
expect(minio.secretKey).toEqual('minio2')
})
it('single-s3-line', async () => {
const cfg: StorageConfiguration = { default: '', storages: [] }
parseStorageEnv('s3|https://s3.somehost.com?accessKey=minio&secretKey=minio2', cfg)
expect(cfg.default).toEqual('s3')
const minio = cfg.storages[0] as S3Config
expect(minio.endpoint).toEqual('https://s3.somehost.com')
expect(minio.port).toEqual(undefined)
expect(minio.accessKey).toEqual('minio')
expect(minio.secretKey).toEqual('minio2')
})
it('multiple', async () => {
const cfg: StorageConfiguration = { default: '', storages: [] }
parseStorageEnv(
'minio|localhost:9000?accessKey=minio&secretKey=minio2;s3|http://localhost?accessKey=minio&secretKey=minio2',
cfg
)
expect(cfg.default).toEqual('s3')
expect(cfg.storages.length).toEqual(2)
})
it('test-decode unexpected symbols', async () => {
const cfg: StorageConfiguration = { default: '', storages: [] }
parseStorageEnv(
'minio|localhost:9000?accessKey=%F0%9F%91%85%F0%9F%91%BB%20-%20%D0%AD%D0%A2%D0%9E%20%20%20%20%D1%82%D0%B0%D0%BA%D0%BE%D0%B9%20%D0%BF%D0%B0%D1%80%D0%BE%D0%BB%D1%8C%0A%D0%90%20%D1%82%D0%BE&secretKey=minio2&downloadUrl=https%3A%2F%2Ffront.hc.engineering',
cfg
)
expect(cfg.default).toEqual('minio')
const minio = cfg.storages[0] as MinioConfig
expect(minio.endpoint).toEqual('localhost')
expect(minio.port).toEqual(9000)
expect(minio.accessKey).toEqual('👅👻 - ЭТО такой пароль\nА то')
expect(minio.secretKey).toEqual('minio2')
expect((minio as any).downloadUrl).toEqual('https://front.hc.engineering')
})
})

View File

@ -1,53 +1,3 @@
import { MinioConfig } from '@hcengineering/minio'
import { StorageConfiguration } from '@hcengineering/server-core'
export function storageConfigFromEnv (): StorageConfiguration {
const storageConfig: StorageConfiguration = JSON.parse(
process.env.STORAGE_CONFIG ?? '{ "default": "", "storages": []}'
)
if (storageConfig.storages.length === 0 || storageConfig.default === '') {
// 'STORAGE_CONFIG is required for complex configuration, fallback to minio config'
let minioEndpoint = process.env.MINIO_ENDPOINT
if (minioEndpoint === undefined) {
console.error('MINIO_ENDPOINT is required')
process.exit(1)
}
const minioAccessKey = process.env.MINIO_ACCESS_KEY
if (minioAccessKey === undefined) {
console.error('MINIO_ACCESS_KEY is required')
process.exit(1)
}
let minioPort = 9000
const sp = minioEndpoint.split(':')
if (sp.length > 1) {
minioEndpoint = sp[0]
minioPort = parseInt(sp[1])
}
const minioSecretKey = process.env.MINIO_SECRET_KEY
if (minioSecretKey === undefined) {
console.error('MINIO_SECRET_KEY is required')
process.exit(1)
}
const minioConfig: MinioConfig = {
kind: 'minio',
name: 'minio',
port: minioPort,
region: 'us-east-1',
useSSL: false,
endpoint: minioEndpoint,
accessKeyId: minioAccessKey,
secretAccessKey: minioSecretKey
}
storageConfig.storages.push(minioConfig)
storageConfig.default = 'minio'
}
return storageConfig
}
export interface ServerEnv {
url: string
elasticUrl: string

View File

@ -44,6 +44,7 @@
"@hcengineering/server-token": "^0.6.7",
"@hcengineering/server-core": "^0.6.1",
"@hcengineering/server": "^0.6.4",
"@hcengineering/server-storage": "^0.6.0",
"@hcengineering/mongo": "^0.6.1",
"@hcengineering/minio": "^0.6.0",
"fast-equals": "^5.0.1"

View File

@ -91,7 +91,7 @@ export class BlobClient {
async pipeFromStorage (name: string, size: number): Promise<Buffer> {
let written = 0
const chunkSize = 256 * 1024
const chunkSize = 1024 * 1024
const chunks: Buffer[] = []
// Use ranges to iterave through file with retry if required.
@ -107,6 +107,9 @@ export class BlobClient {
const chunk = Buffer.from(await response.arrayBuffer())
chunks.push(chunk)
written += chunk.length
if (size > 1024 * 1024) {
console.log('Downloaded', Math.round(written / (1024 * 1024)), 'Mb of', Math.round(size / (1024 * 1024)))
}
break
} catch (err: any) {
if (i === 4) {

View File

@ -31,8 +31,8 @@ import core, {
} from '@hcengineering/core'
import { consoleModelLogger, MigrateOperation, ModelLogger } from '@hcengineering/model'
import { createMongoTxAdapter, DBCollectionHelper, getMongoClient, getWorkspaceDB } from '@hcengineering/mongo'
import { buildStorageFromConfig, storageConfigFromEnv } from '@hcengineering/server'
import { DomainIndexHelperImpl, StorageAdapter, StorageConfiguration } from '@hcengineering/server-core'
import { buildStorageFromConfig, storageConfigFromEnv } from '@hcengineering/server-storage'
import { Db, Document } from 'mongodb'
import { connect } from './connect'
import toolPlugin from './plugin'
@ -73,24 +73,6 @@ export function prepareTools (rawTxes: Tx[]): {
mongodbUri: string
txes: Tx[]
} {
const minioEndpoint = process.env.MINIO_ENDPOINT
if (minioEndpoint === undefined) {
console.error('please provide minio endpoint')
process.exit(1)
}
const minioAccessKey = process.env.MINIO_ACCESS_KEY
if (minioAccessKey === undefined) {
console.error('please provide minio access key')
process.exit(1)
}
const minioSecretKey = process.env.MINIO_SECRET_KEY
if (minioSecretKey === undefined) {
console.error('please provide minio secret key')
process.exit(1)
}
const mongodbUri = process.env.MONGO_URL
if (mongodbUri === undefined) {
console.error('please provide mongodb url.')

1
tests/.env Normal file
View File

@ -0,0 +1 @@
STORAGE_CONFIG="minio|minio?accessKey=minioadmin&secretKey=minioadmin"

View File

@ -45,9 +45,7 @@ services:
- MONGO_URL=mongodb://mongodb:27018
- TRANSACTOR_URL=ws://transactor:3334
- ENDPOINT_URL=ws://localhost:3334
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- STORAGE_CONFIG=${STORAGE_CONFIG}
- MODEL_ENABLED=*
front:
image: hardcoreeng/front
@ -75,9 +73,7 @@ services:
- TELEGRAM_URL=http://localhost:8086
- COLLABORATOR_URL=ws://localhost:3079
- COLLABORATOR_API_URL=http://localhost:3079
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- STORAGE_CONFIG=${STORAGE_CONFIG}
- LAST_NAME_FIRST=true
transactor:
image: hardcoreeng/transactor
@ -98,9 +94,7 @@ services:
- MONGO_URL=mongodb://mongodb:27018
- METRICS_CONSOLE=false
- METRICS_FILE=metrics.txt
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- STORAGE_CONFIG=${STORAGE_CONFIG}
- REKONI_URL=http://rekoni:4005
- FRONT_URL=http://localhost:8083
- UPLOAD_URL=http://localhost:8083/files
@ -122,9 +116,7 @@ services:
- TRANSACTOR_URL=ws://transactor:3334
- UPLOAD_URL=/files
- MONGO_URL=mongodb://mongodb:27018
- MINIO_ENDPOINT=minio
- MINIO_ACCESS_KEY=minioadmin
- MINIO_SECRET_KEY=minioadmin
- STORAGE_CONFIG=${STORAGE_CONFIG}
restart: unless-stopped
rekoni:
image: hardcoreeng/rekoni-service