starry / backend /omr-service /src /config.ts
k-l-lambda's picture
Initial deployment: frontend + omr-service + cluster-server + nginx proxy
6f1c297
import dotenv from 'dotenv';
import { existsSync } from 'fs';
import { dirname, join } from 'path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
const envLocalPath = join(__dirname, '..', '.env.local');
if (existsSync(envLocalPath)) dotenv.config({ path: envLocalPath });
dotenv.config();
export const config = {
server: {
port: parseInt(process.env.PORT || '3080'),
host: process.env.HOST || '0.0.0.0',
},
database: {
host: process.env.DB_HOST || 'localhost',
port: parseInt(process.env.DB_PORT || '5432'),
database: process.env.DB_NAME || 'starry_omr',
user: process.env.DB_USER || 'postgres',
password: process.env.DB_PASSWORD || 'postgres',
},
predictors: {
layout: process.env.PREDICTOR_LAYOUT || 'tcp://localhost:12022',
gauge: process.env.PREDICTOR_GAUGE || 'tcp://localhost:12023',
mask: process.env.PREDICTOR_MASK || 'tcp://localhost:12024',
semantic: process.env.PREDICTOR_SEMANTIC || 'tcp://localhost:12025',
loc: process.env.PREDICTOR_LOC || 'tcp://localhost:12026',
ocr: process.env.PREDICTOR_OCR || 'tcp://localhost:12027',
brackets: process.env.PREDICTOR_BRACKETS || 'tcp://localhost:12028',
jianpu: process.env.PREDICTOR_JIANPU || 'tcp://localhost:12030',
gaugeRenderer: process.env.PREDICTOR_GAUGE_RENDERER || 'tcp://localhost:12123',
},
taskWorker: {
pollInterval: parseInt(process.env.TASK_POLL_INTERVAL || '1000'),
maxConcurrent: parseInt(process.env.TASK_MAX_CONCURRENT || '2'),
},
regulation: {
modelPath: process.env.BDTOPO_MODEL_PATH || '',
enabled: process.env.REGULATION_ENABLED !== 'false',
},
};
export type PredictorType = keyof typeof config.predictors;