@ -0,0 +1,141 @@ |
|||
# ---> Go |
|||
# Binaries for programs and plugins |
|||
*.exe |
|||
*.exe~ |
|||
*.dll |
|||
*.so |
|||
*.dylib |
|||
|
|||
# Test binary, built with `go test -c` |
|||
*.test |
|||
|
|||
# Output of the go coverage tool, specifically when used with LiteIDE |
|||
*.out |
|||
|
|||
# Dependency directories (remove the comment below to include it) |
|||
# vendor/ |
|||
|
|||
# ---> Node |
|||
# Logs |
|||
logs |
|||
*.log |
|||
npm-debug.log* |
|||
yarn-debug.log* |
|||
yarn-error.log* |
|||
lerna-debug.log* |
|||
.pnpm-debug.log* |
|||
|
|||
# Diagnostic reports (https://nodejs.org/api/report.html) |
|||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json |
|||
|
|||
# Runtime data |
|||
pids |
|||
*.pid |
|||
*.seed |
|||
*.pid.lock |
|||
|
|||
# Directory for instrumented libs generated by jscoverage/JSCover |
|||
lib-cov |
|||
|
|||
# Coverage directory used by tools like istanbul |
|||
coverage |
|||
*.lcov |
|||
|
|||
# nyc test coverage |
|||
.nyc_output |
|||
|
|||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) |
|||
.grunt |
|||
|
|||
# Bower dependency directory (https://bower.io/) |
|||
bower_components |
|||
|
|||
# node-waf configuration |
|||
.lock-wscript |
|||
|
|||
# Compiled binary addons (https://nodejs.org/api/addons.html) |
|||
build/Release |
|||
|
|||
# Dependency directories |
|||
node_modules/ |
|||
jspm_packages/ |
|||
|
|||
# Snowpack dependency directory (https://snowpack.dev/) |
|||
web_modules/ |
|||
|
|||
# TypeScript cache |
|||
*.tsbuildinfo |
|||
|
|||
# Optional npm cache directory |
|||
.npm |
|||
|
|||
# Optional eslint cache |
|||
.eslintcache |
|||
|
|||
# Microbundle cache |
|||
.rpt2_cache/ |
|||
.rts2_cache_cjs/ |
|||
.rts2_cache_es/ |
|||
.rts2_cache_umd/ |
|||
|
|||
# Optional REPL history |
|||
.node_repl_history |
|||
|
|||
# Output of 'npm pack' |
|||
*.tgz |
|||
|
|||
# Yarn Integrity file |
|||
.yarn-integrity |
|||
|
|||
# dotenv environment variables file |
|||
.env |
|||
.env.test |
|||
.env.production |
|||
|
|||
# parcel-bundler cache (https://parceljs.org/) |
|||
.cache |
|||
.parcel-cache |
|||
|
|||
# Next.js build output |
|||
.next |
|||
out |
|||
|
|||
# Nuxt.js build / generate output |
|||
.nuxt |
|||
dist |
|||
|
|||
# Gatsby files |
|||
.cache/ |
|||
# Comment in the public line in if your project uses Gatsby and not Next.js |
|||
# https://nextjs.org/blog/next-9-1#public-directory-support |
|||
# public |
|||
|
|||
# vuepress build output |
|||
.vuepress/dist |
|||
|
|||
# Serverless directories |
|||
.serverless/ |
|||
|
|||
# FuseBox cache |
|||
.fusebox/ |
|||
|
|||
# DynamoDB Local files |
|||
.dynamodb/ |
|||
|
|||
# TernJS port file |
|||
.tern-port |
|||
|
|||
# Stores VSCode versions used for testing VSCode extensions |
|||
.vscode-test |
|||
|
|||
# yarn v2 |
|||
.yarn/cache |
|||
.yarn/unplugged |
|||
.yarn/build-state.yml |
|||
.yarn/install-state.gz |
|||
.pnp.* |
|||
|
|||
*yarn.lock |
|||
*package-lock.json |
|||
*log/ |
|||
*downloadFiles/ |
@ -0,0 +1,2 @@ |
|||
# FS-IOT |
|||
|
@ -0,0 +1,52 @@ |
|||
{ |
|||
// 使用 IntelliSense 了解相关属性。 |
|||
// 悬停以查看现有属性的描述。 |
|||
// 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387 |
|||
"version": "0.2.0", |
|||
"configurations": [ |
|||
{ |
|||
"type": "node", |
|||
"request": "launch", |
|||
"name": "启动API", |
|||
"program": "${workspaceRoot}/server.js", |
|||
"env": { |
|||
"NODE_ENV": "development" |
|||
}, |
|||
"args": [ |
|||
"-p 4000", |
|||
"-f http://localhost:4000", |
|||
"-g postgres://postgres:123@10.8.30.32:5432/video_access", |
|||
"--redisHost 10.8.30.112", |
|||
"--redisPort 6379", |
|||
"--axyApiUrl http://127.0.0.1:4100", |
|||
"--iotAuthApi http://127.0.0.1:4200", |
|||
"--godUrl https://restapi.amap.com/v3", |
|||
"--godKey 21c2d970e1646bb9a795900dd00093ce", |
|||
"--mqttVideoServer mqtt://10.8.30.71:30883", |
|||
"--iotVideoServerUrl http://221.230.55.27:8081", |
|||
// "--iotVideoServerUrl http://10.8.30.59:8080", |
|||
"--cameraPlayWsHost ws://221.230.55.27:8081", |
|||
"--cameraPlayHttpFlvHost http://221.230.55.27:2020", |
|||
"--cameraPlayHlsHost http://221.230.55.27:8081", |
|||
"--cameraPlayRtmpHost rtmp://221.230.55.27:1935", |
|||
"--cameraPlayRtspHost rtsp://221.230.55.27:554" |
|||
] |
|||
}, |
|||
{ |
|||
"type": "node", |
|||
"request": "launch", |
|||
"name": "run mocha", |
|||
"program": "${workspaceRoot}/node_modules/mocha/bin/_mocha", |
|||
"stopOnEntry": false, |
|||
"args": [ |
|||
"app/test/*.test.js", |
|||
"--no-timeouts" |
|||
], |
|||
"cwd": "${workspaceRoot}", |
|||
"runtimeExecutable": null, |
|||
"env": { |
|||
"NODE_ENV": "development" |
|||
} |
|||
} |
|||
] |
|||
} |
@ -0,0 +1,36 @@ |
|||
# FROM repository.anxinyun.cn/devops/node:12-dev as builder |
|||
|
|||
# COPY . /var/app |
|||
|
|||
# WORKDIR /var/app |
|||
|
|||
# EXPOSE 8080 |
|||
|
|||
# RUN npm config set registry=http://10.8.30.22:7000 |
|||
# RUN echo "{\"time\":\"$BUILD_TIMESTAMP\",\"build\": \"$BUILD_NUMBER\",\"revision\": \"$SVN_REVISION_1\",\"URL\":\"$SVN_URL_1\"}" > version.json |
|||
# RUN npm cache clean -f |
|||
# RUN rm -rf package-lock.json |
|||
# RUN npm install --registry http://10.8.30.22:7000 |
|||
|
|||
# FROM registry.cn-hangzhou.aliyuncs.com/fs-devops/node:12 |
|||
|
|||
# COPY --from=builder --chown=node /var/app /home/node/app |
|||
|
|||
# WORKDIR /home/node/app |
|||
|
|||
# CMD ["node", "server.js"] |
|||
|
|||
|
|||
# 旧版本构建方式 |
|||
|
|||
FROM repository.anxinyun.cn/base-images/nodejs12:20.10.12.2 |
|||
|
|||
COPY . /var/app |
|||
|
|||
WORKDIR /var/app |
|||
|
|||
EXPOSE 8080 |
|||
|
|||
CMD ["-u", "http://localhost:8088"] |
|||
|
|||
ENTRYPOINT [ "node", "server.js" ] |
@ -0,0 +1,3 @@ |
|||
'use strict'; |
|||
|
|||
module.exports = require('./lib'); |
@ -0,0 +1,126 @@ |
|||
'use strict'; |
|||
|
|||
const fs = require('fs'); |
|||
const path = require('path'); |
|||
const utils = require('./utils') |
|||
const routes = require('./routes'); |
|||
const redisConnect = require('./service/redis') |
|||
const socketConect = require('./service/socket') |
|||
const mqttVideoServer = require('./service/mqttVideoServer') |
|||
const paasRequest = require('./service/paasRequest'); |
|||
const authenticator = require('./middlewares/authenticator'); |
|||
const schedule = require('./schedule') |
|||
// const apiLog = require('./middlewares/api-log');
|
|||
|
|||
module.exports.entry = function (app, router, opts) { |
|||
app.fs.logger.log('info', '[FS-AUTH]', 'Inject auth and api mv into router.'); |
|||
|
|||
app.fs.api = app.fs.api || {}; |
|||
app.fs.utils = app.fs.utils || {}; |
|||
app.fs.api.authAttr = app.fs.api.authAttr || {}; |
|||
app.fs.api.logAttr = app.fs.api.logAttr || {}; |
|||
|
|||
// 顺序固定 ↓
|
|||
redisConnect(app, opts) |
|||
socketConect(app, opts) |
|||
mqttVideoServer(app, opts) |
|||
|
|||
// 实例其他平台请求方法
|
|||
paasRequest(app, opts) |
|||
|
|||
// 工具类函数
|
|||
utils(app, opts) |
|||
|
|||
// 定时任务
|
|||
schedule(app, opts) |
|||
|
|||
// 鉴权中间件
|
|||
router.use(authenticator(app, opts)); |
|||
|
|||
// 日志记录
|
|||
// router.use(apiLog(app, opts));
|
|||
|
|||
router = routes(app, router, opts); |
|||
}; |
|||
|
|||
module.exports.models = function (dc) { // dc = { orm: Sequelize对象, ORM: Sequelize, models: {} }
|
|||
// 加载定义模型 历史写法
|
|||
// require('./models/nvr')(dc);
|
|||
|
|||
// 模型关系摘出来 初始化之后再定义关系才行
|
|||
fs.readdirSync(path.join(__dirname, '/models')).forEach((filename) => { |
|||
require(`./models/${filename}`)(dc) |
|||
}); |
|||
|
|||
const { |
|||
Nvr, Camera, CameraAbility, CameraAbilityBind, CameraKind, CameraRemark, |
|||
GbCamera, SecretYingshi, Vender, CameraStatus, CameraStatusResolve, CameraStatusLog, |
|||
CameraStatusPushConfig, CameraStatusPushMonitor, CameraStatusPushLog, CameraStatusPushReceiver, CameraStatusOfflineLog, |
|||
Mirror, MirrorTree, MirrorFilterGroup, MirrorFilter, MirrorCamera |
|||
} = dc.models; |
|||
|
|||
// Nvr.belongsTo(User, { foreignKey: 'userId', targetKey: 'id' });
|
|||
// User.hasMany(Nvr, { foreignKey: 'userId', sourceKey: 'id' });
|
|||
|
|||
Camera.belongsToMany(CameraAbility, { through: CameraAbilityBind, foreignKey: 'cameraId', otherKey: 'abilityId' }); |
|||
|
|||
CameraRemark.belongsTo(Camera, { foreignKey: 'cameraId', targetKey: 'id' }); |
|||
Camera.hasMany(CameraRemark, { foreignKey: 'cameraId', sourceKey: 'id' }); |
|||
|
|||
Camera.belongsTo(CameraKind, { foreignKey: 'kindId', targetKey: 'id' }); |
|||
CameraKind.hasMany(Camera, { foreignKey: 'kindId', sourceKey: 'id' }); |
|||
|
|||
Camera.belongsTo(Nvr, { foreignKey: 'nvrId', targetKey: 'id' }); |
|||
Nvr.hasMany(Camera, { foreignKey: 'nvrId', sourceKey: 'id' }); |
|||
|
|||
Nvr.belongsTo(GbCamera, { foreignKey: 'serialNo', targetKey: 'streamid', as: 'gbNvr' }); |
|||
GbCamera.hasMany(Nvr, { foreignKey: 'serialNo', sourceKey: 'streamid', as: 'gbNvr' }); |
|||
|
|||
Camera.belongsTo(GbCamera, { foreignKey: 'gbId', targetKey: 'id' }); |
|||
GbCamera.hasMany(Camera, { foreignKey: 'gbId', sourceKey: 'id' }); |
|||
|
|||
Camera.belongsTo(SecretYingshi, { foreignKey: 'yingshiSecretId', targetKey: 'id' }); |
|||
SecretYingshi.hasMany(Camera, { foreignKey: 'yingshiSecretId', sourceKey: 'id' }); |
|||
|
|||
Camera.belongsTo(Vender, { foreignKey: 'venderId', targetKey: 'id' }); |
|||
Vender.hasMany(Camera, { foreignKey: 'venderId', sourceKey: 'id' }); |
|||
|
|||
Nvr.belongsTo(Vender, { foreignKey: 'venderId', targetKey: 'id' }); |
|||
Vender.hasMany(Nvr, { foreignKey: 'venderId', sourceKey: 'id' }); |
|||
|
|||
CameraStatusResolve.belongsTo(CameraStatus, { foreignKey: 'statusId', targetKey: 'id' }); |
|||
CameraStatus.hasMany(CameraStatusResolve, { foreignKey: 'statusId', sourceKey: 'id' }); |
|||
|
|||
CameraStatusLog.belongsTo(CameraStatus, { foreignKey: 'statusId', targetKey: 'id' }); |
|||
CameraStatus.hasMany(CameraStatusLog, { foreignKey: 'statusId', sourceKey: 'id' }); |
|||
|
|||
CameraStatusPushMonitor.belongsTo(CameraStatusPushConfig, { foreignKey: 'configId', targetKey: 'id' }); |
|||
CameraStatusPushConfig.hasMany(CameraStatusPushMonitor, { foreignKey: 'configId', sourceKey: 'id' }); |
|||
|
|||
CameraStatusPushMonitor.belongsTo(Camera, { foreignKey: 'cameraId', targetKey: 'id' }); |
|||
Camera.hasMany(CameraStatusPushMonitor, { foreignKey: 'cameraId', sourceKey: 'id' }); |
|||
|
|||
CameraStatusPushLog.belongsTo(CameraStatusPushConfig, { foreignKey: 'pushConfigId', targetKey: 'id' }); |
|||
CameraStatusPushConfig.hasMany(CameraStatusPushLog, { foreignKey: 'pushConfigId', sourceKey: 'id' }); |
|||
|
|||
CameraStatusPushReceiver.belongsTo(CameraStatusPushConfig, { foreignKey: 'configId', targetKey: 'id' }); |
|||
CameraStatusPushConfig.hasMany(CameraStatusPushReceiver, { foreignKey: 'configId', sourceKey: 'id' }); |
|||
|
|||
CameraStatusOfflineLog.belongsTo(Camera, { foreignKey: 'cameraId', targetKey: 'id' }); |
|||
Camera.hasMany(CameraStatusOfflineLog, { foreignKey: 'cameraId', sourceKey: 'id' }); |
|||
|
|||
MirrorTree.belongsTo(Mirror, { foreignKey: 'mirrorId', targetKey: 'id' }); |
|||
Mirror.hasMany(MirrorTree, { foreignKey: 'mirrorId', sourceKey: 'id' }); |
|||
|
|||
MirrorFilterGroup.belongsTo(Mirror, { foreignKey: 'mirrorId', targetKey: 'id' }); |
|||
Mirror.hasMany(MirrorFilterGroup, { foreignKey: 'mirrorId', sourceKey: 'id' }); |
|||
|
|||
MirrorFilter.belongsTo(MirrorFilterGroup, { foreignKey: 'groupId', targetKey: 'id' }); |
|||
MirrorFilterGroup.hasMany(MirrorFilter, { foreignKey: 'groupId', sourceKey: 'id' }); |
|||
|
|||
MirrorCamera.belongsTo(Camera, { foreignKey: 'cameraId', targetKey: 'id' }); |
|||
Camera.hasMany(MirrorCamera, { foreignKey: 'cameraId', sourceKey: 'id' }); |
|||
|
|||
MirrorCamera.belongsTo(Mirror, { foreignKey: 'mirrorId', targetKey: 'id' }); |
|||
Mirror.hasMany(MirrorCamera, { foreignKey: 'mirrorId', sourceKey: 'id' }); |
|||
}; |
@ -0,0 +1,83 @@ |
|||
/** |
|||
* Created by PengPeng on 2017/4/26. |
|||
*/ |
|||
'use strict'; |
|||
|
|||
const moment = require('moment'); |
|||
const pathToRegexp = require('path-to-regexp'); |
|||
|
|||
function factory(app, opts) { |
|||
async function sendToEsAsync(producer, payloads) { |
|||
return new Promise((resolve, reject) => { |
|||
producer.send(payloads, function (err) { |
|||
if (err) { |
|||
reject(err); |
|||
} else { |
|||
resolve(); |
|||
} |
|||
}); |
|||
}) |
|||
} |
|||
|
|||
async function logger(ctx, next) { |
|||
const { path, method } = ctx; |
|||
const start = Date.now(); |
|||
|
|||
// 等待路由处理
|
|||
await next(); |
|||
|
|||
try { |
|||
let logAttr = null; |
|||
for (let prop in app.fs.api.logAttr) { |
|||
let keys = []; |
|||
let re = pathToRegexp(prop.replace(/\:[A-Za-z_\-]+\b/g, '(\\d+)'), keys); |
|||
if (re.test(`${method}${path}`)) { |
|||
logAttr = app.fs.api.logAttr[prop]; |
|||
break; |
|||
} |
|||
} |
|||
let parameter = null, parameterShow = null, user_id, _token, app_key; |
|||
if (ctx.fs.api) { |
|||
const { actionParameter, actionParameterShow, userId, token, appKey } = ctx.fs.api; |
|||
parameter = actionParameter; |
|||
parameterShow = actionParameterShow; |
|||
user_id = userId; |
|||
_token = token; |
|||
app_key = appKey; |
|||
} |
|||
const producer = ctx.fs.kafka.producer; |
|||
|
|||
const message = { |
|||
log_time: moment().toISOString(), |
|||
method: method, |
|||
content: logAttr ? logAttr.content : '', |
|||
parameter: JSON.stringify(parameter) || JSON.stringify(ctx.request.body), |
|||
parameter_show: parameterShow, |
|||
visible: logAttr ? logAttr.visible : true, |
|||
cost: Date.now() - start, |
|||
status_code: ctx.status, |
|||
url: ctx.request.url, |
|||
user_agent: ctx.request.headers["user-agent"], |
|||
user_id: user_id, |
|||
session: _token, |
|||
app_key: app_key, |
|||
header: JSON.stringify(ctx.request.headers), |
|||
ip: ctx.request.headers["x-real-ip"] || ctx.ip |
|||
}; |
|||
|
|||
const payloads = [{ |
|||
topic: `${opts.kafka.topicPrefix}`, |
|||
messages: [JSON.stringify(message)], |
|||
partition: 0 |
|||
}]; |
|||
|
|||
// await sendToEsAsync(producer, payloads);
|
|||
|
|||
} catch (e) { |
|||
ctx.fs.logger.error(`日志记录失败: ${e}`); |
|||
} |
|||
} |
|||
return logger; |
|||
} |
|||
|
|||
module.exports = factory; |
@ -0,0 +1,144 @@ |
|||
/** |
|||
* Created by PengLing on 2017/3/27. |
|||
*/ |
|||
'use strict'; |
|||
|
|||
const pathToRegexp = require('path-to-regexp'); |
|||
const util = require('util'); |
|||
const moment = require('moment'); |
|||
|
|||
class ExcludesUrls { |
|||
constructor(opts) { |
|||
this.allUrls = undefined; |
|||
this.reload(opts); |
|||
} |
|||
|
|||
sanitizePath (path) { |
|||
if (!path) return '/'; |
|||
const p = '/' + path.replace(/^\/+/i, '').replace(/\/+$/, '').replace(/\/{2,}/, '/'); |
|||
return p; |
|||
} |
|||
|
|||
reload (opts) { |
|||
// load all url
|
|||
if (!this.allUrls) { |
|||
this.allUrls = opts; |
|||
let that = this; |
|||
this.allUrls.forEach(function (url, i, arr) { |
|||
if (typeof url === "string") { |
|||
url = { p: url, o: '*' }; |
|||
arr[i] = url; |
|||
} |
|||
const keys = []; |
|||
let eachPath = url.p; |
|||
url.p = (!eachPath || eachPath === '(.*)' || util.isRegExp(eachPath)) ? eachPath : that.sanitizePath(eachPath); |
|||
url.pregexp = pathToRegexp(eachPath, keys); |
|||
}); |
|||
} |
|||
} |
|||
|
|||
isExcluded (path, method) { |
|||
return this.allUrls.some(function (url) { |
|||
return !url.auth |
|||
&& url.pregexp.test(path) |
|||
&& (url.o === '*' || url.o.indexOf(method) !== -1); |
|||
}); |
|||
} |
|||
} |
|||
|
|||
/** |
|||
* 判断Url是否不鉴权 |
|||
* @param {*} opts {exclude: [*] or []},'*'或['*']:跳过所有路由; []:所有路由都要验证 |
|||
* @param {*} path 当前request的path |
|||
* @param {*} method 当前request的method |
|||
*/ |
|||
let isPathExcluded = function (opts, path, method) { |
|||
let excludeAll = Boolean(opts.exclude && opts.exclude.length && opts.exclude[0] == '*'); |
|||
let excludes = null; |
|||
if (!excludeAll) { |
|||
let excludeOpts = opts.exclude || []; |
|||
excludeOpts.push({ p: '/login', o: 'POST' }); |
|||
excludeOpts.push({ p: '/logout', o: 'PUT' }); |
|||
excludes = new ExcludesUrls(excludeOpts); |
|||
} |
|||
let excluded = excludeAll || excludes.isExcluded(path, method); |
|||
return excluded; |
|||
}; |
|||
|
|||
let authorizeToken = async function (ctx, token) { |
|||
let rslt = null; |
|||
const tokenFormatRegexp = /^(\{{0,1}([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}\}{0,1})$/g; |
|||
if (token && tokenFormatRegexp.test(token)) { |
|||
try { |
|||
const expired = await ctx.redis.hget(token, 'expired'); |
|||
|
|||
if (expired && moment().valueOf() <= moment(expired).valueOf()) { |
|||
const userInfo = JSON.parse(await ctx.redis.hget(token, 'userInfo')); |
|||
rslt = { |
|||
'authorized': userInfo.authorized, |
|||
'resources': (userInfo || {}).resources || [], |
|||
}; |
|||
ctx.fs.api.userId = userInfo.id; |
|||
ctx.fs.api.userInfo = userInfo; |
|||
ctx.fs.api.token = token; |
|||
} |
|||
} catch (err) { |
|||
const { error } = err.response || {}; |
|||
ctx.fs.logger.log('[anxinyun]', '[AUTH] failed', (error || {}).message || `cannot GET /users/${token}`); |
|||
} |
|||
} |
|||
return rslt; |
|||
}; |
|||
|
|||
let isResourceAvailable = function (resources, options) { |
|||
let authCode = null; |
|||
// authorize user by authorization attribute
|
|||
const { authAttr, method, path } = options; |
|||
for (let prop in authAttr) { |
|||
let keys = []; |
|||
let re = pathToRegexp(prop.replace(/\:[A-Za-z_\-]+\b/g, '(\\d+)'), keys); |
|||
if (re.test(`${method}${path}`)) { |
|||
authCode = authAttr[prop]; |
|||
break; |
|||
} |
|||
} |
|||
return !authCode || (resources || []).some(code => code === authCode); |
|||
}; |
|||
|
|||
function factory (app, opts) { |
|||
return async function auth (ctx, next) { |
|||
const { path, method, header, query } = ctx; |
|||
ctx.fs.logger.log('[AUTH] start', path, method); |
|||
ctx.fs.api = ctx.fs.api || {}; |
|||
ctx.fs.port = opts.port; |
|||
ctx.redis = app.redis; |
|||
ctx.redisTools = app.redisTools; |
|||
let error = null; |
|||
if (path) { |
|||
if (!isPathExcluded(opts, path, method)) { |
|||
const user = await authorizeToken(ctx, header.token || query.token); |
|||
if (user && user.authorized) { |
|||
// if (!isResourceAvailable(user.resources, { authAttr: app.fs.auth.authAttr, path, method })) {
|
|||
// error = { status: 403, name: 'Forbidden' }
|
|||
// } else {
|
|||
// error = { status: 401, name: 'Unauthorized' }
|
|||
// }
|
|||
} else { |
|||
error = { status: 401, name: 'Unauthorized' } |
|||
} |
|||
} |
|||
} else { |
|||
error = { status: 401, name: 'Unauthorized' }; |
|||
} |
|||
if (error) { |
|||
ctx.fs.logger.log('[AUTH] failed', path, method); |
|||
ctx.status = error.status; |
|||
ctx.body = error.name; |
|||
} else { |
|||
ctx.fs.logger.log('[AUTH] passed', path, method); |
|||
await next(); |
|||
} |
|||
} |
|||
} |
|||
|
|||
module.exports = factory; |
@ -0,0 +1,88 @@ |
|||
/* eslint-disable*/ |
|||
'use strict'; |
|||
|
|||
module.exports = dc => { |
|||
const DataTypes = dc.ORM; |
|||
const sequelize = dc.orm; |
|||
const Application = sequelize.define("application", { |
|||
id: { |
|||
type: DataTypes.INTEGER, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: true, |
|||
field: "id", |
|||
autoIncrement: true, |
|||
unique: "application_id_uindex" |
|||
}, |
|||
name: { |
|||
type: DataTypes.STRING, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: false, |
|||
field: "name", |
|||
autoIncrement: false |
|||
}, |
|||
type: { |
|||
type: DataTypes.ARRAY(DataTypes.STRING), |
|||
allowNull: true, |
|||
defaultValue: null, |
|||
comment: "web / app / wxapp / other", |
|||
primaryKey: false, |
|||
field: "type", |
|||
autoIncrement: false |
|||
}, |
|||
appKey: { |
|||
type: DataTypes.STRING, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: false, |
|||
field: "app_key", |
|||
autoIncrement: false |
|||
}, |
|||
appSecret: { |
|||
type: DataTypes.STRING, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: false, |
|||
field: "app_secret", |
|||
autoIncrement: false |
|||
}, |
|||
createUserId: { |
|||
type: DataTypes.INTEGER, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: false, |
|||
field: "create_user_id", |
|||
autoIncrement: false |
|||
}, |
|||
createTime: { |
|||
type: DataTypes.DATE, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: false, |
|||
field: "create_time", |
|||
autoIncrement: false |
|||
}, |
|||
forbidden: { |
|||
type: DataTypes.BOOLEAN, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: false, |
|||
field: "forbidden", |
|||
autoIncrement: false |
|||
} |
|||
}, { |
|||
tableName: "application", |
|||
comment: "", |
|||
indexes: [] |
|||
}); |
|||
dc.models.Application = Application; |
|||
return Application; |
|||
}; |
@ -0,0 +1,35 @@ |
|||
/* eslint-disable*/ |
|||
'use strict'; |
|||
|
|||
module.exports = dc => { |
|||
const DataTypes = dc.ORM; |
|||
const sequelize = dc.orm; |
|||
const AxProject = sequelize.define("axProject", { |
|||
id: { |
|||
type: DataTypes.INTEGER, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: true, |
|||
field: "id", |
|||
autoIncrement: false, |
|||
unique: "ax_project_id_uindex" |
|||
}, |
|||
name: { |
|||
type: DataTypes.STRING, |
|||
allowNull: false, |
|||
defaultValue: null, |
|||
comment: null, |
|||
primaryKey: false, |
|||
field: "name", |
|||
autoIncrement: false |
|||
} |
|||
}, { |
|||
tableName: "ax_project", |
|||
comment: "", |
|||
indexes: [] |
|||
}); |
|||
|
|||
dc.models.AxProject = AxProject; |
|||
return AxProject; |
|||
}; |
@ -0,0 +1,17 @@ |
|||
'use strict'; |
|||
|
|||
const path = require('path'); |
|||
const fs = require('fs'); |
|||
|
|||
module.exports = function (app, router, opts) { |
|||
fs.readdirSync(__dirname).forEach((filename) => { |
|||
if (filename.indexOf('.') !== 0 && fs.lstatSync(path.join(__dirname, filename)).isDirectory()) { |
|||
fs.readdirSync(path.join(__dirname, filename)).forEach((api) => { |
|||
if (api.indexOf('.') == 0 || api.indexOf('.js') == -1) return; |
|||
require(`./${filename}/${api}`)(app, router, opts); |
|||
}); |
|||
} |
|||
}); |
|||
|
|||
return router; |
|||
}; |
@ -0,0 +1,40 @@ |
|||
'use strict'; |
|||
const mqtt = require('mqtt'); |
|||
|
|||
module.exports = async function factory (app, opts) { |
|||
console.info(`mqtt connecting ${opts.mqtt.mqttVideoServer}`); |
|||
|
|||
const client = mqtt.connect(opts.mqtt.mqttVideoServer); |
|||
|
|||
client.on('connect', function () { |
|||
console.info(`mqtt connect success ${opts.mqtt.mqttVideoServer}`); |
|||
client.subscribe('topic/test', { qos: 0 });//订阅主题为test的消息
|
|||
}) |
|||
client.on('error', function (e) { |
|||
console.error(`mqtt connect failed ${opts.mqtt.mqttVideoServer}`); |
|||
app.fs.logger.error('info', '[FS-AUTH-MQTT]', `mqtt connect failed ${opts.mqtt.mqttVideoServer}`); |
|||
}) |
|||
|
|||
client.on('message', async (top, message) => { |
|||
let msgStr = message.toString(); |
|||
let msg = JSON.parse(msgStr.replace(/\\/g, '')); |
|||
if (msg.id && msg.online) { |
|||
const { cameraStatePush } = app.fs.utils |
|||
const { models } = app.fs.dc |
|||
const gbCameraRes = await models.GbCamera.findOne({ |
|||
where: { |
|||
id: msg.id |
|||
} |
|||
}) |
|||
if (gbCameraRes) { |
|||
cameraStatePush({ |
|||
gbId: msg.id, |
|||
online: msg.online, |
|||
ipctype: gbCameraRes.ipctype, |
|||
}) |
|||
} |
|||
} |
|||
}); |
|||
|
|||
app.mqttVideoServer = client |
|||
} |
@ -0,0 +1,67 @@ |
|||
'use strict'; |
|||
const request = require('superagent') |
|||
|
|||
class paasRequest { |
|||
constructor(root, { query = {} } = {}, option) { |
|||
this.root = root; |
|||
this.query = query |
|||
this.option = option |
|||
} |
|||
|
|||
#buildUrl = (url) => { |
|||
return `${this.root}/${url}`; |
|||
} |
|||
|
|||
#resultHandler = (resolve, reject) => { |
|||
return (err, res) => { |
|||
if (err) { |
|||
reject(err); |
|||
} else { |
|||
resolve(res[this.option.dataWord]); |
|||
} |
|||
}; |
|||
} |
|||
|
|||
get = (url, { query = {}, header = {} } = {}) => { |
|||
return new Promise((resolve, reject) => { |
|||
request.get(this.#buildUrl(url)).set(header).query(Object.assign(query, this.query)).end(this.#resultHandler(resolve, reject)); |
|||
}) |
|||
} |
|||
|
|||
post = (url, { data = {}, query = {}, header = {} } = {}) => { |
|||
return new Promise((resolve, reject) => { |
|||
request.post(this.#buildUrl(url)).set(header).query(Object.assign(query, this.query)).send(data).end(this.#resultHandler(resolve, reject)); |
|||
}) |
|||
} |
|||
|
|||
put = (url, { data = {}, header = {}, query = {}, } = {}) => { |
|||
return new Promise((resolve, reject) => { |
|||
request.put(this.#buildUrl(url)).set(header).query(Object.assign(query, this.query)).send(data).end(this.#resultHandler(resolve, reject)); |
|||
}) |
|||
} |
|||
|
|||
delete = (url, { header = {}, query = {} } = {}) => { |
|||
return new Promise((resolve, reject) => { |
|||
request.delete(this.#buildUrl(url)).set(header).query(Object.assign(query, this.query)).end(this.#resultHandler(resolve, reject)); |
|||
}) |
|||
} |
|||
} |
|||
|
|||
function factory (app, opts) { |
|||
if (opts.pssaRequest) { |
|||
try { |
|||
for (let r of opts.pssaRequest) { |
|||
if (r.name && r.root) { |
|||
app.fs[r.name] = new paasRequest(r.root, { ...(r.params || {}) }, { dataWord: r.dataWord || 'body' }) |
|||
} else { |
|||
throw 'opts.pssaRequest 参数错误!' |
|||
} |
|||
} |
|||
} catch (error) { |
|||
console.error(error) |
|||
process.exit(-1); |
|||
} |
|||
} |
|||
} |
|||
|
|||
module.exports = factory; |
@ -0,0 +1,41 @@ |
|||
'use strict'; |
|||
// https://github.com/luin/ioredis
|
|||
const redis = require("ioredis") |
|||
|
|||
module.exports = async function factory (app, opts) { |
|||
let client = opts.redis.pwd ? |
|||
new redis.Cluster([ |
|||
{ |
|||
host: opts.redis.host, |
|||
port: opts.redis.port |
|||
} |
|||
], { |
|||
redisOptions: { |
|||
password: opts.redis.pwd, |
|||
}, |
|||
}) |
|||
: new redis(opts.redis.port, opts.redis.host, { |
|||
password: opts.redis.pwd, |
|||
}); |
|||
|
|||
client.on("error", function (err) { |
|||
app.fs.logger.error('info', '[FS-AUTH-REDIS]', `redis connect error. ${opts.redis.host + ':' + opts.redis.port}`); |
|||
// console.error("Error :", err);
|
|||
// process.exit(-1);
|
|||
}); |
|||
|
|||
client.on('connect', function () { |
|||
console.info(`redis connect success ${opts.redis.host + ':' + opts.redis.port}`); |
|||
}) |
|||
|
|||
// 自定义方法
|
|||
async function hdelall (key) { |
|||
const obj = await client.hgetall(key); |
|||
await client.hdel(key, Object.keys(obj)) |
|||
} |
|||
|
|||
app.redis = client |
|||
app.redisTools = { |
|||
hdelall, |
|||
} |
|||
} |
@ -0,0 +1,33 @@ |
|||
'use strict'; |
|||
|
|||
module.exports = async function factory (app, opts) { |
|||
|
|||
app.socket.on('connection', async (socket) => { |
|||
console.info('WEB_SOCKET ' + socket.handshake.query.token + ' 已连接:' + socket.id); |
|||
socket.on('disconnecting', async (reason) => { |
|||
console.info('WEB_SOCKET ' + socket.handshake.query.token + ' 已断开连接:' + reason); |
|||
}) |
|||
}) |
|||
|
|||
// 使用测试 保持链接
|
|||
setInterval(async () => { |
|||
const { connected } = app.socket.sockets |
|||
|
|||
const roomId = 'ROOM_' + Math.random() |
|||
// if (connected) {
|
|||
// for (let c in connected) {
|
|||
// connected[c].join(roomId)
|
|||
// }
|
|||
// app.socket.to(roomId).emit('TEST', { someProperty: `【星域 ROOM:${roomId}】呼叫自然选择号!!!`, })
|
|||
// }
|
|||
|
|||
app.socket.emit('TEST', { someProperty: '【广播】呼叫青铜时代号!!!', }) |
|||
|
|||
// app.socket.emit('CAMERA_ONLINE', {
|
|||
// ipctype: 'yingshi',
|
|||
// online: Math.random() > 0.5 ? 'ON' : 'OFF',
|
|||
// gbId: Math.floor(Math.random() * 100),
|
|||
// name: 'cameraName'
|
|||
// })
|
|||
}, 3000) |
|||
} |
@ -0,0 +1,16 @@ |
|||
'use strict'; |
|||
|
|||
const path = require('path'); |
|||
const fs = require('fs'); |
|||
|
|||
module.exports = async function (app, opts) { |
|||
fs.readdirSync(__dirname).forEach((filename) => { |
|||
if (!['index.js'].some(f => filename == f)) { |
|||
const utils = require(`./${filename}`)(app, opts) |
|||
app.fs.utils = { |
|||
...app.fs.utils, |
|||
...utils, |
|||
} |
|||
} |
|||
}); |
|||
}; |
@ -0,0 +1,54 @@ |
|||
const fs = require('fs'); |
|||
|
|||
module.exports = function (app, opts) { |
|||
async function oauthParseAuthHeader (auth) { |
|||
if (!auth) { |
|||
throw new Error('参数无效: 未包含Authorization头'); |
|||
} |
|||
|
|||
const authSplit = auth.split('Basic'); |
|||
if (authSplit.length != 2) { |
|||
throw new Error('参数无效: Authorization头格式无效,请检查是否包含了"Basic "'); |
|||
} |
|||
|
|||
const authCode = authSplit[1]; |
|||
const apikey = Buffer.from(authCode, 'base64').toString(); |
|||
|
|||
const keySplit = apikey.split(':'); |
|||
if (keySplit.length != 2) { |
|||
throw new Error('参数无效:请检查Authorization头内容是否经过正确Base64编码'); |
|||
} |
|||
|
|||
return keySplit; |
|||
} |
|||
|
|||
async function oauthParseBody (body, type) { |
|||
let checked = true, token = ''; |
|||
if (type == 'apply' && body['grant_type'] != 'client_credentials') { |
|||
checked = false; |
|||
} else if (type == 'refresh') { |
|||
if (body['grant_type'] != 'refresh_token' || body['token'] == null) { |
|||
checked = false; |
|||
} else { |
|||
token = body['token']; |
|||
} |
|||
} else if (type == 'invalidate') { |
|||
if (body['token'] == null) { |
|||
checked = false; |
|||
} else { |
|||
token = body['token']; |
|||
} |
|||
} |
|||
|
|||
if (!checked) { |
|||
throw new Error('参数无效:请求正文中未包含正确的信息'); |
|||
} |
|||
|
|||
return token; |
|||
} |
|||
|
|||
return { |
|||
oauthParseAuthHeader, |
|||
oauthParseBody |
|||
} |
|||
} |
@ -0,0 +1,82 @@ |
|||
'use strict'; |
|||
const fs = require('fs'); |
|||
const xlsx = require('better-xlsx'); |
|||
const path = require('path') |
|||
const moment = require('moment') |
|||
|
|||
|
|||
module.exports = function (app, opts) { |
|||
|
|||
//递归创建目录 同步方法
|
|||
async function makeDir (dir) { |
|||
if (!fs.existsSync(dir)) { |
|||
makeDir(path.dirname(dir)) |
|||
fs.mkdirSync(dir, function (err) { |
|||
if (err) { |
|||
throw err |
|||
} |
|||
}); |
|||
} |
|||
} |
|||
|
|||
async function simpleExcelDown ({ data = [], header = [], fileName = moment().format('YYYY-MM-DD HH:mm:ss') } = {}) { |
|||
const fileDirPath = path.join(__dirname, `../../downloadFiles`) |
|||
makeDir(fileDirPath) |
|||
const file = new xlsx.File(); |
|||
const sheet_1 = file.addSheet('sheet_1'); |
|||
|
|||
// header
|
|||
const headerStyle = new xlsx.Style(); |
|||
headerStyle.align.h = 'center'; |
|||
headerStyle.align.v = 'center'; |
|||
headerStyle.border.right = 'thin'; |
|||
headerStyle.border.rightColor = '#000000'; |
|||
headerStyle.border.bottom = 'thin'; |
|||
headerStyle.border.bottomColor = '#000000'; |
|||
|
|||
const headerRow = sheet_1.addRow(); |
|||
const indexCell = headerRow.addCell(); |
|||
indexCell.value = '序号' |
|||
indexCell.style = headerStyle |
|||
for (let h of header) { |
|||
const cell = headerRow.addCell(); |
|||
cell.value = h.title; |
|||
cell.style = headerStyle |
|||
} |
|||
|
|||
// data
|
|||
const style = new xlsx.Style(); |
|||
style.align.h = 'left'; |
|||
style.align.v = 'center'; |
|||
style.border.right = 'thin'; |
|||
style.border.rightColor = '#000000'; |
|||
style.border.bottom = 'thin'; |
|||
style.border.bottomColor = '#000000'; |
|||
for (let i = 0; i < data.length; i++) { |
|||
const row = sheet_1.addRow(); |
|||
const indexCell = row.addCell(); |
|||
indexCell.value = i + 1 |
|||
indexCell.style = headerStyle |
|||
for (let h of header) { |
|||
const cell = row.addCell(); |
|||
cell.value = data[i][h.key]; |
|||
cell.style = style |
|||
} |
|||
} |
|||
|
|||
const savePath = path.join(fileDirPath, fileName) |
|||
await new Promise(function (resolve, reject) { |
|||
file.saveAs() |
|||
.pipe(fs.createWriteStream(savePath)) |
|||
.on('finish', () => { |
|||
resolve() |
|||
}); |
|||
}) |
|||
return savePath |
|||
} |
|||
|
|||
return { |
|||
simpleExcelDown, |
|||
makeDir |
|||
} |
|||
} |
@ -0,0 +1,216 @@ |
|||
'use strict'; |
|||
/*jslint node:true*/ |
|||
const path = require('path'); |
|||
const os = require('os'); |
|||
const moment = require('moment'); |
|||
const args = require('args'); |
|||
|
|||
const dev = process.env.NODE_ENV == 'development'; |
|||
|
|||
// 启动参数
|
|||
args.option(['p', 'port'], '启动端口'); |
|||
args.option(['g', 'pg'], 'postgre服务URL'); |
|||
args.option(['f', 'fileHost'], '文件中心本地化存储: WebApi 服务器地址(必填), 该服务器提供文件上传Web服务'); |
|||
|
|||
args.option('redisHost', 'redisHost'); |
|||
args.option('redisPort', 'redisPort'); |
|||
args.option('redisPswd', 'redisPassword'); |
|||
|
|||
args.option('axyApiUrl', '安心云 api'); |
|||
args.option('iotAuthApi', 'IOT 鉴权 api'); |
|||
|
|||
args.option('godUrl', '高德地图API请求地址'); |
|||
args.option('godKey', '高德地图API key'); |
|||
|
|||
args.option('iotVideoServerUrl', '视频后端服务地址'); |
|||
args.option('mqttVideoServer', '视频后台 mqtt 服务 URL'); |
|||
args.option('cameraPlayWsHost', '视频播放地址 ws://xxx:xxx'); |
|||
args.option('cameraPlayHttpFlvHost', '视频播放地址 httpFlv'); |
|||
args.option('cameraPlayHlsHost', '视频播放地址 hls'); |
|||
args.option('cameraPlayRtmpHost', '视频播放地址 rtmp'); |
|||
args.option('cameraPlayRtspHost', '视频播放地址 rtsp'); |
|||
|
|||
const flags = args.parse(process.argv); |
|||
|
|||
const IOT_VIDEO_ACCESS_DB = process.env.IOT_VIDEO_ACCESS_DB || flags.pg; |
|||
const IOT_VIDEO_ACCESS_LOCAL_SVR_ORIGIN = process.env.IOT_VIDEO_ACCESS_LOCAL_SVR_ORIGIN || flags.fileHost; |
|||
|
|||
// Redis 参数
|
|||
const IOTA_REDIS_SERVER_HOST = process.env.IOTA_REDIS_SERVER_HOST || flags.redisHost || "localhost";//redis IP
|
|||
const IOTA_REDIS_SERVER_PORT = process.env.IOTA_REDIS_SERVER_PORT || flags.redisPort || "6379";//redis 端口
|
|||
const IOTA_REDIS_SERVER_PWD = process.env.IOTA_REDIS_SERVER_PWD || flags.redisPswd || "";//redis 密码
|
|||
|
|||
// 鉴权 api
|
|||
const IOT_AUTH_API = process.env.IOT_AUTH_API || flags.iotAuthApi; |
|||
// 安心云api
|
|||
const AXY_API_URL = process.env.AXY_API_URL || flags.axyApiUrl; |
|||
|
|||
// 高德地图的参数
|
|||
const GOD_URL = process.env.GOD_URL || flags.godUrl || 'https://restapi.amap.com/v3'; |
|||
const GOD_KEY = process.env.GOD_KEY || flags.godKey; |
|||
|
|||
// 萤石服务的地址
|
|||
const YINGSHI_URL = process.env.YINGSHI_URL || flags.yingshiUrl || 'https://open.ys7.com/api'; |
|||
|
|||
// 视频后台服务的地址
|
|||
const IOT_VIDEO_SERVER_URL = process.env.IOT_VIDEO_SERVER_URL || flags.iotVideoServerUrl |
|||
// 视频后台 mqtt 信息推送地址
|
|||
const MQTT_VIDEO_SERVER = process.env.MQTT_VIDEO_SERVER || flags.mqttVideoServer; |
|||
|
|||
// 几种国标摄像头播放地址前缀
|
|||
const CAMERA_PLAY_WS_HOST = process.env.CAMERA_PLAY_WS_HOST || flags.cameraPlayWsHost; |
|||
const CAMERA_PLAY_HTTP_FLV_HOST = process.env.CAMERA_PLAY_HTTP_FLV_HOST || flags.cameraPlayHttpFlvHost; |
|||
const CAMERA_PLAY_HLS_HOST = process.env.CAMERA_PLAY_HLS_HOST || flags.cameraPlayHlsHost; |
|||
const CAMERA_PLAY_RTMP_HOST = process.env.CAMERA_PLAY_RTMP_HOST || flags.cameraPlayRtmpHost; |
|||
const CAMERA_PLAY_RTSP_HOST = process.env.CAMERA_PLAY_RTSP_HOST || flags.cameraPlayRtspHost; |
|||
|
|||
if (!IOT_VIDEO_ACCESS_DB || !IOTA_REDIS_SERVER_HOST || !IOTA_REDIS_SERVER_PORT || !GOD_KEY || !MQTT_VIDEO_SERVER || |
|||
!AXY_API_URL || !IOT_VIDEO_SERVER_URL || !IOT_AUTH_API || |
|||
!CAMERA_PLAY_WS_HOST || !CAMERA_PLAY_HTTP_FLV_HOST || !CAMERA_PLAY_HLS_HOST || !CAMERA_PLAY_RTMP_HOST || !CAMERA_PLAY_RTSP_HOST |
|||
) { |
|||
console.log('缺少启动参数,异常退出'); |
|||
args.showHelp(); |
|||
process.exit(-1); |
|||
} |
|||
|
|||
const product = { |
|||
port: flags.port || 8080, |
|||
staticDirs: ['static'], |
|||
mws: [ |
|||
{ |
|||
entry: require('@fs/attachment').entry, |
|||
opts: { |
|||
local: { |
|||
origin: IOT_VIDEO_ACCESS_LOCAL_SVR_ORIGIN || `http://localhost:${flags.port || 8080}`, |
|||
rootPath: 'static', |
|||
childPath: 'upload', |
|||
}, |
|||
maxSize: 104857600, // 100M
|
|||
} |
|||
}, { |
|||
entry: require('./app').entry, |
|||
opts: { |
|||
dev, |
|||
exclude: [ |
|||
{ p: '/camera', o: 'GET' }, // 暂时滴
|
|||
{ p: '/application/check', o: 'GET' }, // 暂时滴
|
|||
], // 不做认证的路由,也可以使用 exclude: ["*"] 跳过所有路由
|
|||
redis: { |
|||
host: IOTA_REDIS_SERVER_HOST, |
|||
port: IOTA_REDIS_SERVER_PORT, |
|||
pwd: IOTA_REDIS_SERVER_PWD |
|||
}, |
|||
mqtt: { |
|||
mqttVideoServer: MQTT_VIDEO_SERVER, |
|||
}, |
|||
cameraPlayHost: { |
|||
ws: CAMERA_PLAY_WS_HOST, |
|||
httpFlv: CAMERA_PLAY_HTTP_FLV_HOST, |
|||
hls: CAMERA_PLAY_HLS_HOST, |
|||
rtmp: CAMERA_PLAY_RTMP_HOST, |
|||
rtsp: CAMERA_PLAY_RTSP_HOST, |
|||
}, |
|||
sms: { |
|||
///阿里云-安心云
|
|||
accessKey: 'LTAI5tAFdjz7j38aNF2C9Qe8', |
|||
accessSecret: '1trYkmiqfBtvZL6BxkNH2uQcQQPs0S' |
|||
}, |
|||
email: { |
|||
enabled: true, |
|||
host: 'smtp.exmail.qq.com', |
|||
port: 465, |
|||
sender: { |
|||
name: '尚视', |
|||
address: 'fsiot@free-sun.com.cn', |
|||
password: 'Fs2689' |
|||
} |
|||
}, |
|||
pssaRequest: [ |
|||
{// name 会作为一个 request 出现在 ctx.app.fs
|
|||
name: 'authRequest', |
|||
root: IOT_AUTH_API |
|||
}, { |
|||
name: 'axyRequest', |
|||
root: AXY_API_URL |
|||
}, { |
|||
name: 'godRequest', |
|||
root: GOD_URL, |
|||
params: { |
|||
query: { |
|||
key: GOD_KEY |
|||
} |
|||
} |
|||
}, { |
|||
name: 'yingshiRequest', |
|||
root: YINGSHI_URL, |
|||
params: { |
|||
query: { |
|||
key: GOD_KEY |
|||
} |
|||
} |
|||
}, { |
|||
name: 'videoServerRequest', |
|||
root: IOT_VIDEO_SERVER_URL + '/api', |
|||
dataWord: 'text' |
|||
}, |
|||
] |
|||
} |
|||
} |
|||
], |
|||
dc: { |
|||
url: IOT_VIDEO_ACCESS_DB, |
|||
opts: { |
|||
pool: { |
|||
max: 80, |
|||
min: 10, |
|||
idle: 10000 |
|||
}, |
|||
define: { |
|||
freezeTableName: true, // 固定表名
|
|||
timestamps: false // 不含列 "createAt"/"updateAt"/"DeleteAt"
|
|||
}, |
|||
timezone: '+08:00', |
|||
logging: false |
|||
}, |
|||
models: [require('./app').models] |
|||
}, |
|||
logger: { |
|||
level: 'info', |
|||
json: false, |
|||
filename: path.join(__dirname, 'log', 'runtime.log'), |
|||
colorize: false, |
|||
maxsize: 1024 * 1024 * 5, |
|||
rotationFormat: false, |
|||
zippedArchive: true, |
|||
maxFiles: 10, |
|||
prettyPrint: true, |
|||
label: '', |
|||
timestamp: () => moment().format('YYYY-MM-DD HH:mm:ss.SSS'), |
|||
eol: os.EOL, |
|||
tailable: true, |
|||
depth: null, |
|||
showLevel: true, |
|||
maxRetries: 1 |
|||
} |
|||
}; |
|||
|
|||
const development = { |
|||
port: product.port, |
|||
staticDirs: product.staticDirs, |
|||
mws: product.mws, |
|||
dc: product.dc, |
|||
logger: product.logger |
|||
}; |
|||
|
|||
if (dev) { |
|||
// mws
|
|||
for (let mw of development.mws) { |
|||
// if (mw.opts.exclude) mw.opts.exclude = ['*']; // 使用 ['*'] 跳过所有路由
|
|||
} |
|||
// logger
|
|||
development.logger.filename = path.join(__dirname, 'log', 'development.log'); |
|||
development.logger.level = 'debug'; |
|||
development.dc.opts.logging = console.log; |
|||
} |
|||
|
|||
module.exports = dev ? development : product; |
@ -0,0 +1,41 @@ |
|||
{ |
|||
"name": "smart-emergency", |
|||
"version": "1.0.0", |
|||
"description": "fs smart emergency api", |
|||
"main": "server.js", |
|||
"scripts": { |
|||
"test": "set DEBUG=true&&\"node_modules/.bin/mocha\" --harmony --reporter spec app/test/*.test.js", |
|||
"start": "set NODE_ENV=development&&node server -p 4000 -g postgres://postgres:123@10.8.30.32:5432/video_access -f http://localhost:4000", |
|||
"start:linux": "export NODE_ENV=development&&node server -p 4000 -g postgres://FashionAdmin:123456@10.8.30.39:5432/pm1", |
|||
"automate": "sequelize-automate -c sequelize-automate.config.js" |
|||
}, |
|||
"author": "", |
|||
"license": "MIT", |
|||
"repository": {}, |
|||
"dependencies": { |
|||
"@alicloud/pop-core": "^1.7.12", |
|||
"@fs/attachment": "^1.0.0", |
|||
"args": "^3.0.7", |
|||
"better-xlsx": "^0.7.6", |
|||
"crypto-js": "^4.0.0", |
|||
"file-saver": "^2.0.2", |
|||
"fs-web-server-scaffold": "^2.0.2", |
|||
"ioredis": "^5.0.4", |
|||
"koa-convert": "^1.2.0", |
|||
"koa-proxy": "^0.9.0", |
|||
"moment": "^2.24.0", |
|||
"mqtt": "^4.3.7", |
|||
"node-schedule": "^2.1.0", |
|||
"nodemailer": "^6.7.7", |
|||
"path": "^0.12.7", |
|||
"path-to-regexp": "^3.0.0", |
|||
"pg": "^7.9.0", |
|||
"redis": "^3.1.2", |
|||
"request": "^2.88.2", |
|||
"superagent": "^3.5.2", |
|||
"uuid": "^3.3.2" |
|||
}, |
|||
"devDependencies": { |
|||
"mocha": "^6.0.2" |
|||
} |
|||
} |
@ -0,0 +1,35 @@ |
|||
module.exports = { |
|||
// 数据库配置 与 sequelize 相同
|
|||
dbOptions: { |
|||
database: 'video_access', |
|||
username: 'postgres', |
|||
password: '123', |
|||
dialect: 'postgres', |
|||
host: '10.8.30.32', |
|||
port: 5432, |
|||
define: { |
|||
underscored: false, |
|||
freezeTableName: false, |
|||
charset: 'utf8mb4', |
|||
timezone: '+00: 00', |
|||
dialectOptions: { |
|||
collate: 'utf8_general_ci', |
|||
}, |
|||
timestamps: false, |
|||
}, |
|||
}, |
|||
options: { |
|||
type: 'freesun', // 指定 models 代码风格
|
|||
camelCase: true, // Models 文件中代码是否使用驼峰命名
|
|||
modalNameSuffix: false, // 模型名称是否带 ‘Model’ 后缀
|
|||
fileNameCamelCase: false, // Model 文件名是否使用驼峰法命名,默认文件名会使用表名,如 `user_post.js`;如果为 true,则文件名为 `userPost.js`
|
|||
dir: './app/lib/models', // 指定输出 models 文件的目录
|
|||
typesDir: 'models', // 指定输出 TypeScript 类型定义的文件目录,只有 TypeScript / Midway 等会有类型定义
|
|||
emptyDir: false, // !!! 谨慎操作 生成 models 之前是否清空 `dir` 以及 `typesDir`
|
|||
tables: ['mirror',], // 指定生成哪些表的 models,如 ['user', 'user_post'];如果为 null,则忽略改属性
|
|||
skipTables: [], // 指定跳过哪些表的 models,如 ['user'];如果为 null,则忽略改属性
|
|||
tsNoCheck: false, // 是否添加 `@ts-nocheck` 注释到 models 文件中
|
|||
ignorePrefix: [], // 生成的模型名称忽略的前缀,因为 项目中有以下表名是以 t_ 开头的,在实际模型中不需要, 可以添加多个 [ 't_data_', 't_',] ,长度较长的 前缀放前面
|
|||
attrLength: false, // 在生成模型的字段中 是否生成 如 var(128)这种格式,公司一般使用 String ,则配置为 false
|
|||
}, |
|||
} |
@ -0,0 +1,12 @@ |
|||
/** |
|||
* Created by rain on 2016/1/25. |
|||
*/ |
|||
|
|||
'use strict'; |
|||
/*jslint node:true*/ |
|||
//from koa
|
|||
|
|||
const scaffold = require('fs-web-server-scaffold'); |
|||
const config = require('./config'); |
|||
|
|||
module.exports = scaffold(config); |
@ -0,0 +1,54 @@ |
|||
2022规划:物联网感知平台(物联网数据接入中台服务) |
|||
|
|||
1. 数据分析工具 |
|||
|
|||
基于notebook+python实现在线数据分析功能,提供hive/iceberg数据源。实现行业服务科常用分析方法,提供可视化界面,实现分析算法和可视化组件的动态组合。可以自定义分析流程、制定分析任务。分析结果报表文件生成和导出下载。 |
|||
|
|||
2. 原型定义扩展 |
|||
|
|||
原型组合、单位可选、公式可选。 |
|||
|
|||
增加监测原型灵活性,支持公式选择、单位选择(之前2.0的遗留功能)。 |
|||
|
|||
3. 动态数据接入和边缘网关能力 |
|||
|
|||
加强平台动态数据处理能力,主要考虑边缘计算+数据湖/OSS存储方案。 |
|||
|
|||
扩展边缘网关振动采集、DAC采集能力,实现动态数据在边缘节点的计算和存储。可实现边缘独立工作和云边协同处理能力,数据最终可汇报到平台进行存储分析。(可扩展云厂商存储能力) |
|||
|
|||
4. 存储 |
|||
|
|||
应用数据湖技术。ES存储能力协同HDFS文档型存储,提供hive/iceberg抽象层定义,存储海量异构数据。存储介质上考虑自建机房SSD热数据存储+通用机械硬盘阵列温数据备份,补充购买使用云厂商OSS服务存储冷数据,实现数据的容灾以及不同使用场景的存储需求。 |
|||
|
|||
5. ETL |
|||
|
|||
构建通用的Flink+Python 批流一体处理框架,除现有通用数据处理流程,可以给各个智慧应用提供自定义的数据处理能力,包括实时的数据处理、预告警、反向控制,以及历史数据的批处理分析、机器学习和AI训练能力。 |
|||
|
|||
6. 超融合,租户资源隔离 |
|||
|
|||
超融合是将服务器硬件资源打散融合,按需分配。实现一套简单的IaaS服务,部署我们的PaaS和SaaS平台,实现对用户资源的隔离、限制。 |
|||
|
|||
7. 继续提高平台稳定性、健壮性 |
|||
1. DAC故障跟踪解决,提示数据接入的稳定性 |
|||
2. 限流算法在数据接入、接口请求方面的应用 |
|||
3. 支持埋点跟踪数据日志 |
|||
4. 研发运维能力:服务进程状态/性能跟踪 |
|||
|
|||
8. 视频接入优化和性能提升 |
|||
|
|||
语言技术栈统一,支持ffmepg通用数据流格式推流解析。支持分布式负载均衡部署。 |
|||
|
|||
9. 3D、BIM展示应用和GIS展示 |
|||
|
|||
持续研究以上内容在动效、性能、交互能力上的提升 |
|||
|
|||
10. 大屏展示组件化,低代码开发 |
|||
|
|||
研究低代码实现大屏的可能性,实现自定义大屏模板、组件拖拽、主题定义、数据绑定组态功能。 |
|||
|
|||
11. 其他: |
|||
|
|||
1. 工作流引擎持续定制化 |
|||
2. 协议、计算脚本化扩展能力:扩展支持python/JavaScript/Lua等通用脚本语言与Scala的互调,实现更多可自定义的处理能力。 |
|||
3. 拥抱云原生,全面容器化,使用k8s/m-k8s全套部署方案,加强k8s监控,扩展弹性伸缩能力。 |
|||
4. 提供混合云服务,提供多场景的应用部署能力。 |
@ -0,0 +1,292 @@ |
|||
## 部署启动 |
|||
|
|||
### EDGE |
|||
|
|||
**设备型号**:ok-3399C |
|||
|
|||
**系统**:ubuntu-18.02 |
|||
|
|||
**默认用户**:forlinx / forlinx |
|||
|
|||
**网络**: 通过netplan (apply)设置网络地址 |
|||
|
|||
**基础服务:** |
|||
|
|||
+ influxdb |
|||
|
|||
数据库。安装方法参见https://portal.influxdata.com/downloads/ |
|||
|
|||
启动数据库: influxd http://localip:8086/ (设置用户密码 admin/admin123) |
|||
|
|||
获取全局Token (后续edge配置使用) |
|||
|
|||
**启动EDGE** |
|||
|
|||
`edge.conf` |
|||
|
|||
```json |
|||
{ |
|||
"msg.mqtt.center": "10.8.30.236:1883", -- 服务端MQTT服务地址 |
|||
"serial_no": "001", -- 测试设备序列号 |
|||
"influx.token": "rBqy73hzOc1Fk5xxofGjqy5bKSmHBVLQouRBkt8eaXUmhum9c4m5nEMWVkG83ihR8CQjWbzTaLvUMoFp0xegYw==", -- influ操作token |
|||
"db.type":"file", |
|||
"db.dir":"../../resources/test", |
|||
"log.file":true, |
|||
"log.file.loc":"runtime/logs/log" |
|||
} |
|||
``` |
|||
|
|||
```shell |
|||
# 启动主程序 |
|||
chmod +x ./edge |
|||
./edge |
|||
``` |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
### SERVER |
|||
|
|||
**基础服务** |
|||
|
|||
+ Emqx |
|||
|
|||
启动MQTT代理服务, emqx start |
|||
|
|||
+ Prometheus |
|||
|
|||
配置抓取设备指标 |
|||
|
|||
```yaml |
|||
scrape_configs: |
|||
- job_name: "edge-server" |
|||
static_configs: |
|||
- targets: ["localhost:19202"] |
|||
# 调试使用(抓取内网设备上的监控指标) |
|||
- job_name: "dac" |
|||
static_configs: |
|||
- targets: ["10.8.30.244:19201"] |
|||
``` |
|||
|
|||
默认UI地址: http://localhost:9090/ |
|||
|
|||
+ Grafana |
|||
|
|||
配合Prometheus显示EDGE状态和性能指标。 |
|||
|
|||
+ 其他 |
|||
|
|||
+ 连接测试Iota数据库 `postgres://postgres:postgres@10.8.30.156:5432/iota20211206?sslmode=disable` |
|||
+ 部署以太网站 http://10.8.30.38/ |
|||
+ Postman调试工具 |
|||
|
|||
|
|||
|
|||
**启动SERVER** |
|||
|
|||
配置`server.conf` |
|||
|
|||
```json |
|||
{ |
|||
"msg.mqtt.center": "10.8.30.236:1883", -- MQTT Broker地址 |
|||
"web.url":":8088", -- WEB接口地址 |
|||
"db.type": "postgres", |
|||
"db.conn": "postgres://postgres:postgres@10.8.30.156:5432/iota20211206?sslmode=disable", -- 以太数据库地址 |
|||
"log.file":true, |
|||
"log.file.loc":"runtime/logs/log" |
|||
} |
|||
``` |
|||
|
|||
启动Server. |
|||
|
|||
|
|||
|
|||
## 功能演示 |
|||
|
|||
|
|||
|
|||
### 平台新增边缘网关 |
|||
|
|||
目前已经实现CRUD API |
|||
|
|||
**新增设备:** |
|||
|
|||
URL:Post http://localhost:8088/edges |
|||
|
|||
BODY: |
|||
|
|||
```json |
|||
{"serial_no":"002","name":"DEMO-2","hardware":{"name":"FS-EDGE-01"},"software":{"ver":"0.2.1"}} |
|||
``` |
|||
|
|||
RET: 200 |
|||
|
|||
> 平台serial_no设置必须和设备端SerialNo匹配,才能进行设备控制 |
|||
|
|||
|
|||
|
|||
**查询当前所有设备**: |
|||
|
|||
URL: GET localhost:8088/edges |
|||
|
|||
RET: |
|||
|
|||
```json |
|||
{"001":{"serial_no":"001","name":"DEMO-WW","hardware":{"name":"FS-EDGE-01"},"software":{"ver":"0.2.1"},"set_ver":"1","config_ver":"9"},"002":{"serial_no":"002","name":"DEMO-2","properties":{"hb":"true"},"hardware":{"name":"FS-EDGE-01"},"software":{"ver":"0.2.1"},"set_ver":"0","config_ver":"0"}} |
|||
``` |
|||
|
|||
|
|||
|
|||
其他: **修改PUT** 和 **删除 DELETE** |
|||
|
|||
|
|||
|
|||
### 网关在线状态和性能在线统计 |
|||
|
|||
通过网关心跳数据上报,Prometheus抓取,可通过Grafana查看: |
|||
|
|||
![image-20220121162513190](imgs/EDGE-V0.1调试手册/image-20220121162513190.png) |
|||
|
|||
其中心跳数据格式如下: |
|||
|
|||
```json |
|||
{ |
|||
"time": 1642734937400741643, -- 当前数据的设备时间(用于校时) |
|||
"ver": { |
|||
"pv": "v0.0.1" -- 当前配置版本(包括设备配置和采集配置) |
|||
}, |
|||
"machine": { |
|||
"mt": 3845, -- 总内存 |
|||
"mf": 2616, -- 空闲内存 |
|||
"mp": 10.074738688877986, -- 内存使用比 |
|||
"dt": 12031, -- 总磁盘 |
|||
"df": 7320, -- 剩余磁盘空间 |
|||
"dp": 36, -- 磁盘使用率 |
|||
"u": 7547, -- 系统启动时长 |
|||
"pform": "ubuntu", -- 系统信息 |
|||
"pver": "18.04", -- 系统版本 |
|||
"load1": 0.09, -- 1分钟内平均负载 |
|||
"load5": 0.02, -- 5分钟内平均负载 |
|||
"load15": 0.01 -- 15分钟内平均负载 |
|||
} |
|||
} |
|||
``` |
|||
|
|||
|
|||
|
|||
### 绑定结构物到网关 |
|||
|
|||
在以太(测试环境)建立结构物,我们这里模拟的一个振弦采集的场景,如下 |
|||
|
|||
![image-20220121135940527](imgs/EDGE-V0.1调试手册/image-20220121135940527.png) |
|||
|
|||
下发该结构物到边缘网关 |
|||
|
|||
URL:Post http://llocalhost:8088/edge/002/things |
|||
|
|||
BODY: |
|||
|
|||
```json |
|||
["f73d1b17-f2d5-46dd-9dd1-ebbb66b11854"] |
|||
``` |
|||
|
|||
RET: 200 |
|||
|
|||
> 获取指定网关绑定的结构物 GET http://llocalhost:8088/edge/002/things |
|||
|
|||
|
|||
|
|||
下发后,边缘网关自动更新配置(如果未在线,会在下次上下后更新配置),并重启 |
|||
|
|||
![image-20220121152314499](imgs/EDGE-V0.1调试手册/image-20220121152314499.png) |
|||
|
|||
|
|||
|
|||
模拟DTU设备上线到边缘网关, |
|||
|
|||
<img src="imgs/EDGE-V0.1调试手册/image-20220121152705457.png" width=600 align=left/> |
|||
|
|||
|
|||
|
|||
随后边缘网关按照配置的采集规则进行采集,目前可以通过边缘端InfluxDB的Web UI查看数据: |
|||
|
|||
![image-20220121163903101](imgs/EDGE-V0.1调试手册/image-20220121163903101.png) |
|||
|
|||
采集的数据会通过MQTT消息发送到服务端,见下节(采集数据实时预览)。 |
|||
|
|||
同事,在平台更改采集配置(部署)后,通过 POST http://localhost:8088/edge/002/sync 可以触发网关进行配置同步。 |
|||
|
|||
|
|||
|
|||
### 采集数据实时预览 |
|||
|
|||
DAC采集的数据会实时推送到服务器MQTT上,服务端进行**入库**操作,并支持WebSocket像前端接口**推送**。 |
|||
|
|||
ws地址:ws://localhost:8088/edge/ws/{device} |
|||
|
|||
实时数据预览界面:http://localhost:8088/edge/rt/{device} |
|||
|
|||
![image-20220121162951692](imgs/EDGE-V0.1调试手册/image-20220121162951692.png) |
|||
|
|||
|
|||
|
|||
### 绑定包含振动设备的结构物 |
|||
|
|||
新建包含振动设备的结构物,测试如下: |
|||
|
|||
![image-20220121163144291](imgs/EDGE-V0.1调试手册/image-20220121163144291.png) |
|||
|
|||
同上,执行结构物绑定网关操作。 |
|||
|
|||
|
|||
|
|||
模拟振动设备连接到网关,通过日志可以看到网关开始采集振动传感器: |
|||
|
|||
![image-20220121164158554](imgs/EDGE-V0.1调试手册/image-20220121164158554.png) |
|||
|
|||
振动数据存储在本地,通过数据库的定时聚集功能(CQ),生成分钟级聚集数据。查看实时数据如下: |
|||
|
|||
![image-20220121164306992](imgs/EDGE-V0.1调试手册/image-20220121164306992.png) |
|||
|
|||
|
|||
|
|||
### 动态数据实时预览 |
|||
|
|||
振动的实时数据**默认不会**直接推送到平台。 |
|||
|
|||
前端打开振动设备实时数据界面,将发布WS订阅,此时会通知设备开始上报数据(类似视频推流服务的实现),之后类似普通数据的处理方式。 |
|||
|
|||
实时数据刷新界面如下: |
|||
|
|||
![image-20220121164715214](imgs/EDGE-V0.1调试手册/image-20220121164715214.png) |
|||
|
|||
WS订阅退出后,会通知设备关闭实时推流(节约流量、性能和服务端存储)。 |
|||
|
|||
后面会实现云端保存最近一段播放历史、设备上的历史数据回放功能。 |
|||
|
|||
|
|||
|
|||
### 作单机振动采集软件使用 |
|||
|
|||
包含振动采集的配置、采集、计算、存储、转发功能。可以替换某些场景下本地工控机上的DAAS软件。 |
|||
|
|||
> 注:云端工作模式,访问设备上的Vib界面,可以查看配置,但是不能进行修改。 |
|||
|
|||
|
|||
|
|||
振动设备配置:http://10.8.30.244:8828/vib |
|||
|
|||
![image-20220121165041737](imgs/EDGE-V0.1调试手册/image-20220121165041737.png) |
|||
|
|||
振动通道配置: |
|||
|
|||
![image-20220121165146403](imgs/EDGE-V0.1调试手册/image-20220121165146403.png) |
|||
|
|||
IP设置: |
|||
|
|||
![image-20220121165230596](imgs/EDGE-V0.1调试手册/image-20220121165230596.png) |
|||
|
|||
网关侧实时数据预览: |
|||
|
|||
![image-20220121165302506](imgs/EDGE-V0.1调试手册/image-20220121165302506.png) |
@ -0,0 +1 @@ |
|||
1. 历史数据查询 |
@ -0,0 +1,286 @@ |
|||
## 部署启动 |
|||
|
|||
### EDGE |
|||
|
|||
**设备型号**:ok-3399C |
|||
|
|||
**系统**:ubuntu-18.02 |
|||
|
|||
**默认用户**:forlinx / forlinx |
|||
|
|||
**网络**: 通过netplan (apply)设置网络地址 |
|||
|
|||
**安装程序:** |
|||
|
|||
```sh |
|||
#通过串口线连接Console口,或者设置好网络后通过IP地址,远程SSH到板子上 |
|||
# 安装目前只支持在线模式,设备必须接入因特网 |
|||
# 1. 安装docker |
|||
$ sudo apt-get update |
|||
$ sudo apt-get upgrade |
|||
$ curl -fsSL test.docker.com -o get-docker.sh && sh get-docker.sh |
|||
$ sudo usermod -aG docker $USER |
|||
$ sudo apt install gnupg2 pass |
|||
|
|||
# 2. 安装程序 |
|||
# 复制disk包到网关上 |
|||
$ chmox +x docker-compose |
|||
$ docker-compose up -d |
|||
``` |
|||
|
|||
|
|||
|
|||
安装完成之后,在浏览器中访问 http://ip:8828 ,进入如下界面,表示设备初始化成功 |
|||
|
|||
|
|||
|
|||
![image-20220322090946149](imgs/EDGE-V0.2调试手册/image-20220322090946149.png) |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
### SERVER |
|||
|
|||
**基础服务** |
|||
|
|||
+ Emqx |
|||
|
|||
启动MQTT代理服务, emqx start |
|||
|
|||
+ Prometheus |
|||
|
|||
配置抓取设备指标 |
|||
|
|||
```yaml |
|||
scrape_configs: |
|||
- job_name: "edge-server" |
|||
static_configs: |
|||
- targets: ["localhost:19202"] |
|||
# 调试使用(抓取内网设备上的监控指标) |
|||
- job_name: "dac" |
|||
static_configs: |
|||
- targets: ["10.8.30.244:19201"] |
|||
``` |
|||
|
|||
默认UI地址: http://localhost:9090/ |
|||
|
|||
+ Grafana |
|||
|
|||
配合Prometheus显示EDGE状态和性能指标。 |
|||
|
|||
+ 其他 |
|||
|
|||
+ 连接测试Iota数据库 `postgres://postgres:postgres@10.8.30.156:5432/iota20211206?sslmode=disable` |
|||
+ 部署以太网站 http://10.8.30.38/ |
|||
+ Postman调试工具 |
|||
|
|||
|
|||
|
|||
**启动SERVER** |
|||
|
|||
配置`server.conf` |
|||
|
|||
```json |
|||
{ |
|||
"msg.mqtt.center": "10.8.30.236:1883", -- MQTT Broker地址 |
|||
"web.url":":8088", -- WEB接口地址 |
|||
"db.type": "postgres", |
|||
"db.conn": "postgres://postgres:postgres@10.8.30.156:5432/iota20211206?sslmode=disable", -- 以太数据库地址 |
|||
"log.file":true, |
|||
"log.file.loc":"runtime/logs/log" |
|||
} |
|||
``` |
|||
|
|||
启动Server. |
|||
|
|||
|
|||
|
|||
## 功能演示 |
|||
|
|||
|
|||
|
|||
### 平台新增边缘网关 |
|||
|
|||
目前已经实现CRUD API |
|||
|
|||
**新增设备:** |
|||
|
|||
URL:Post http://localhost:8088/edges |
|||
|
|||
BODY: |
|||
|
|||
```json |
|||
{"serial_no":"002","name":"DEMO-2","hardware":{"name":"FS-EDGE-01"},"software":{"ver":"0.2.1"}} |
|||
``` |
|||
|
|||
RET: 200 |
|||
|
|||
> 平台serial_no设置必须和设备端SerialNo匹配,才能进行设备控制 |
|||
|
|||
|
|||
|
|||
**查询当前所有设备**: |
|||
|
|||
URL: GET localhost:8088/edges |
|||
|
|||
RET: |
|||
|
|||
```json |
|||
{"001":{"serial_no":"001","name":"DEMO-WW","hardware":{"name":"FS-EDGE-01"},"software":{"ver":"0.2.1"},"set_ver":"1","config_ver":"9"},"002":{"serial_no":"002","name":"DEMO-2","properties":{"hb":"true"},"hardware":{"name":"FS-EDGE-01"},"software":{"ver":"0.2.1"},"set_ver":"0","config_ver":"0"}} |
|||
``` |
|||
|
|||
|
|||
|
|||
其他: **修改PUT** 和 **删除 DELETE** |
|||
|
|||
|
|||
|
|||
### 网关在线状态和性能在线统计 |
|||
|
|||
通过网关心跳数据上报,Prometheus抓取,可通过Grafana查看: |
|||
|
|||
![image-20220121162513190](imgs/EDGE-V0.1调试手册/image-20220121162513190.png) |
|||
|
|||
其中心跳数据格式如下: |
|||
|
|||
```json |
|||
{ |
|||
"time": 1642734937400741643, -- 当前数据的设备时间(用于校时) |
|||
"ver": { |
|||
"pv": "v0.0.1" -- 当前配置版本(包括设备配置和采集配置) |
|||
}, |
|||
"machine": { |
|||
"mt": 3845, -- 总内存 |
|||
"mf": 2616, -- 空闲内存 |
|||
"mp": 10.074738688877986, -- 内存使用比 |
|||
"dt": 12031, -- 总磁盘 |
|||
"df": 7320, -- 剩余磁盘空间 |
|||
"dp": 36, -- 磁盘使用率 |
|||
"u": 7547, -- 系统启动时长 |
|||
"pform": "ubuntu", -- 系统信息 |
|||
"pver": "18.04", -- 系统版本 |
|||
"load1": 0.09, -- 1分钟内平均负载 |
|||
"load5": 0.02, -- 5分钟内平均负载 |
|||
"load15": 0.01 -- 15分钟内平均负载 |
|||
} |
|||
} |
|||
``` |
|||
|
|||
|
|||
|
|||
### 绑定结构物到网关 |
|||
|
|||
在以太(测试环境)建立结构物,我们这里模拟的一个振弦采集的场景,如下 |
|||
|
|||
![image-20220121135940527](imgs/EDGE-V0.1调试手册/image-20220121135940527.png) |
|||
|
|||
下发该结构物到边缘网关 |
|||
|
|||
URL:Post http://llocalhost:8088/edge/002/things |
|||
|
|||
BODY: |
|||
|
|||
```json |
|||
["f73d1b17-f2d5-46dd-9dd1-ebbb66b11854"] |
|||
``` |
|||
|
|||
RET: 200 |
|||
|
|||
> 获取指定网关绑定的结构物 GET http://llocalhost:8088/edge/002/things |
|||
|
|||
|
|||
|
|||
下发后,边缘网关自动更新配置(如果未在线,会在下次上下后更新配置),并重启 |
|||
|
|||
![image-20220121152314499](imgs/EDGE-V0.1调试手册/image-20220121152314499.png) |
|||
|
|||
|
|||
|
|||
模拟DTU设备上线到边缘网关, |
|||
|
|||
<img src="imgs/EDGE-V0.1调试手册/image-20220121152705457.png" width=600 align=left/> |
|||
|
|||
|
|||
|
|||
随后边缘网关按照配置的采集规则进行采集,目前可以通过边缘端InfluxDB的Web UI查看数据: |
|||
|
|||
![image-20220121163903101](imgs/EDGE-V0.1调试手册/image-20220121163903101.png) |
|||
|
|||
采集的数据会通过MQTT消息发送到服务端,见下节(采集数据实时预览)。 |
|||
|
|||
同事,在平台更改采集配置(部署)后,通过 POST http://localhost:8088/edge/002/sync 可以触发网关进行配置同步。 |
|||
|
|||
|
|||
|
|||
### 采集数据实时预览 |
|||
|
|||
DAC采集的数据会实时推送到服务器MQTT上,服务端进行**入库**操作,并支持WebSocket像前端接口**推送**。 |
|||
|
|||
ws地址:ws://localhost:8088/edge/ws/{device} |
|||
|
|||
实时数据预览界面:http://localhost:8088/edge/rt/{device} |
|||
|
|||
![image-20220121162951692](imgs/EDGE-V0.1调试手册/image-20220121162951692.png) |
|||
|
|||
|
|||
|
|||
### 绑定包含振动设备的结构物 |
|||
|
|||
新建包含振动设备的结构物,测试如下: |
|||
|
|||
![image-20220121163144291](imgs/EDGE-V0.1调试手册/image-20220121163144291.png) |
|||
|
|||
同上,执行结构物绑定网关操作。 |
|||
|
|||
|
|||
|
|||
模拟振动设备连接到网关,通过日志可以看到网关开始采集振动传感器: |
|||
|
|||
![image-20220121164158554](imgs/EDGE-V0.1调试手册/image-20220121164158554.png) |
|||
|
|||
振动数据存储在本地,通过数据库的定时聚集功能(CQ),生成分钟级聚集数据。查看实时数据如下: |
|||
|
|||
![image-20220121164306992](imgs/EDGE-V0.1调试手册/image-20220121164306992.png) |
|||
|
|||
|
|||
|
|||
### 动态数据实时预览 |
|||
|
|||
振动的实时数据**默认不会**直接推送到平台。 |
|||
|
|||
前端打开振动设备实时数据界面,将发布WS订阅,此时会通知设备开始上报数据(类似视频推流服务的实现),之后类似普通数据的处理方式。 |
|||
|
|||
实时数据刷新界面如下: |
|||
|
|||
![image-20220121164715214](imgs/EDGE-V0.1调试手册/image-20220121164715214.png) |
|||
|
|||
WS订阅退出后,会通知设备关闭实时推流(节约流量、性能和服务端存储)。 |
|||
|
|||
后面会实现云端保存最近一段播放历史、设备上的历史数据回放功能。 |
|||
|
|||
|
|||
|
|||
### 作单机振动采集软件使用 |
|||
|
|||
包含振动采集的配置、采集、计算、存储、转发功能。可以替换某些场景下本地工控机上的DAAS软件。 |
|||
|
|||
> 注:云端工作模式,访问设备上的Vib界面,可以查看配置,但是不能进行修改。 |
|||
|
|||
|
|||
|
|||
振动设备配置:http://10.8.30.244:8828/vib |
|||
|
|||
![image-20220121165041737](imgs/EDGE-V0.1调试手册/image-20220121165041737.png) |
|||
|
|||
振动通道配置: |
|||
|
|||
![image-20220121165146403](imgs/EDGE-V0.1调试手册/image-20220121165146403.png) |
|||
|
|||
IP设置: |
|||
|
|||
![image-20220121165230596](imgs/EDGE-V0.1调试手册/image-20220121165230596.png) |
|||
|
|||
网关侧实时数据预览: |
|||
|
|||
![image-20220121165302506](imgs/EDGE-V0.1调试手册/image-20220121165302506.png) |
@ -0,0 +1,69 @@ |
|||
找一根USB转接线连接 板子的Console口,如下: |
|||
|
|||
|
|||
|
|||
![image-20220407085859032](imgs/EDGE-环境准备/image-20220407085859032.png) |
|||
|
|||
|
|||
|
|||
电脑会自动安装驱动,等待自动安装完成,在设备管理界面中,可查看具体的串口号: |
|||
|
|||
![image-20220407090121447](imgs/EDGE-环境准备/image-20220407090121447.png) |
|||
|
|||
|
|||
|
|||
通过putty或xshell等远程工具可以进行SSH远程连接: |
|||
|
|||
![image-20220407090243473](imgs/EDGE-环境准备/image-20220407090243473.png) |
|||
|
|||
|
|||
|
|||
![image-20220407090353559](imgs/EDGE-环境准备/image-20220407090353559.png) |
|||
|
|||
> 默认用户名密码均是forlinx, 可以通过 `sudo su` 命令进入超管账户,密码也是`forlinx` |
|||
|
|||
|
|||
|
|||
进行网络配置: |
|||
|
|||
找一根网线,将板子连接到工作路由上, |
|||
|
|||
```sh |
|||
root@forlinx:/etc/netplan# cd /etc/netplan/ |
|||
root@forlinx:/etc/netplan# ls |
|||
50-cloud-init.yaml |
|||
root@forlinx:/etc/netplan# vi 50-cloud-init.yaml |
|||
network: |
|||
ethernets: |
|||
eth0: |
|||
dhcp4: no |
|||
addresses: [10.8.30.244/24] |
|||
gateway4: 10.8.30.1 |
|||
nameservers: |
|||
addresses: [114.114.114.114] |
|||
search: [localdomain] |
|||
version: 2 |
|||
~ |
|||
root@forlinx:/etc/netplan# netplan apply |
|||
root@forlinx:/etc/netplan# ip a |
|||
``` |
|||
|
|||
![image-20220407090848867](imgs/EDGE-环境准备/image-20220407090848867.png) |
|||
|
|||
这里我的配置是: |
|||
|
|||
```yaml |
|||
network: |
|||
ethernets: |
|||
eth0: |
|||
dhcp4: no |
|||
addresses: [10.8.30.244/24] #网络地址和掩码 |
|||
gateway4: 10.8.30.1 # 网关地址 |
|||
nameservers: |
|||
addresses: [114.114.114.114] # DNS |
|||
search: [localdomain] |
|||
version: 2 |
|||
|
|||
``` |
|||
|
|||
网络配置完成后,即可执行后续命令,具体参照 《EDGE-V-N调试手册.pdf》 |
@ -0,0 +1,505 @@ |
|||
## UCloud云主机 |
|||
|
|||
https://console.ucloud.cn/ |
|||
|
|||
账户密码 FS12345678 |
|||
|
|||
|
|||
|
|||
## 环境准备 |
|||
|
|||
**Postgres** |
|||
|
|||
```sh |
|||
apt update |
|||
apt install postgresql postgresql-contrib |
|||
|
|||
su postgres |
|||
> psql |
|||
> # alter user postgres with password 'ROOT'; |
|||
|
|||
vi /etc/postgresql/9.5/main/pg_hba.conf |
|||
# host all all 10.60.178.0/24 md5 |
|||
service postgresql restart |
|||
|
|||
createdb iOTA_console |
|||
psql -d iOTA_console < dump.sql |
|||
``` |
|||
|
|||
|
|||
|
|||
**Docker** |
|||
|
|||
```sh |
|||
curl -sSL https://get.daocloud.io/docker | sh |
|||
``` |
|||
|
|||
|
|||
|
|||
**Redis** |
|||
|
|||
因为redis默认端口暴露在外网环境不安全,启动ubuntu防火墙 |
|||
|
|||
```sh |
|||
ufw enable |
|||
|
|||
ufw status |
|||
|
|||
# 默认允许外部访问本机 |
|||
ufw default allow |
|||
|
|||
# 禁止6379端口外部访问 |
|||
ufw deny 6379 |
|||
|
|||
# 其他一些 |
|||
# 允许来自10.0.1.0/10访问本机10.8.30.117的7277端口 |
|||
ufw allow proto tcp from 10.0.1.0/10 to 10.8.30.117 7277 |
|||
|
|||
Status: active |
|||
|
|||
To Action From |
|||
-- ------ ---- |
|||
6379 DENY Anywhere |
|||
6379 (v6) DENY Anywhere (v6) |
|||
``` |
|||
|
|||
开放了防火墙,外网还是无法访问开放的端口。进入ucloud控制台, |
|||
|
|||
基础网络UNet > 外网防火墙 > 创建防火墙 (自定义规则) |
|||
|
|||
开放所有tcp端口,只禁用redis-6379 |
|||
|
|||
![image-20211122152046659](imgs/UCloud-DAC上云测试/image-20211122152046659.png) |
|||
|
|||
云主机UHost > 关联资源操作 > 更改外网防火墙 |
|||
|
|||
![image-20211122152136855](imgs/UCloud-DAC上云测试/image-20211122152136855.png) |
|||
|
|||
|
|||
|
|||
安装redis |
|||
|
|||
```sh |
|||
apt update |
|||
apt install redis-server |
|||
``` |
|||
|
|||
|
|||
|
|||
|
|||
|
|||
|
|||
|
|||
## 引流测试 |
|||
|
|||
机房搬迁,准备在云上运行单实例dac进行数据采集。 |
|||
|
|||
准备工作:进行线上引流测试。不影响商用dac的采集,准备如下: |
|||
|
|||
1. proxy上被动连接转发到UCloud。 |
|||
1. 流单向复制。设备 -> proxy -> DAC通路, 开路:DAC->proxy-|->设备。 |
|||
2. 主动连接 |
|||
1. mqtt、http主动连接第三方服务器的, |
|||
2. mqtt 的clientid添加后缀 |
|||
3. 截断driver的写入 |
|||
|
|||
关键代码 |
|||
|
|||
```go |
|||
// io.copy无法多次执行 |
|||
|
|||
|
|||
// 如果配置了OutTarget,则进行本地复制到同时向外复制流 |
|||
func Pipeout(conn1, conn2 net.Conn, port string, wg *sync.WaitGroup, reg []byte) { |
|||
if OutTarget != "" { |
|||
tt := fmt.Sprintf("%s:%s", OutTarget, port) |
|||
tw := NewTeeWriter(tt, reg) |
|||
tw.Start() |
|||
if _, err := io.Copy(tw, io.TeeReader(conn2 /*read*/, conn1 /*write*/)); err != nil { |
|||
log.Error("pipeout error: %v", err) |
|||
} |
|||
tw.Close() |
|||
} else { |
|||
io.Copy(conn1, conn2) |
|||
} |
|||
conn1.Close() |
|||
log.Info("[tcp] close the connect at local:%s and remote:%s", conn1.LocalAddr().String(), conn1.RemoteAddr().String()) |
|||
wg.Done() |
|||
} |
|||
|
|||
// 引流写入器 |
|||
type TeeWriter struct { |
|||
target string // 转发目标地址 |
|||
conn net.Conn // 转发连接 |
|||
isConnect bool // 是否连接 |
|||
exitCh chan interface{} // 退出 |
|||
registry []byte |
|||
} |
|||
|
|||
func NewTeeWriter(target string, reg []byte) *TeeWriter { |
|||
return &TeeWriter{ |
|||
target: target, |
|||
exitCh: make(chan interface{}), |
|||
registry: reg, |
|||
} |
|||
} |
|||
|
|||
func (w *TeeWriter) Start() error { |
|||
go w.keep_connect() |
|||
return nil |
|||
} |
|||
|
|||
func (w *TeeWriter) Close() error { |
|||
close(w.exitCh) |
|||
return nil |
|||
} |
|||
|
|||
func (w *TeeWriter) Write(p []byte) (n int, err error) { |
|||
defer func() { |
|||
if err := recover(); err != nil { |
|||
log.Error("teewrite failed %s", w.target) |
|||
} |
|||
}() |
|||
if w.isConnect { |
|||
go w.conn.Write(p) |
|||
} |
|||
// 此方法永远不报错 |
|||
return len(p), nil |
|||
} |
|||
|
|||
func (w *TeeWriter) keep_connect() { |
|||
defer func() { |
|||
if err := recover(); err != nil { |
|||
log.Error("teewrite keep connect error: %v", err) |
|||
} |
|||
}() |
|||
for { |
|||
if cont := func() bool { |
|||
var err error |
|||
w.conn, err = net.Dial("tcp", w.target) |
|||
if err != nil { |
|||
select { |
|||
case <-time.After(time.Second): |
|||
return true |
|||
case <-w.exitCh: |
|||
return false |
|||
} |
|||
} |
|||
w.isConnect = true |
|||
defer func() { |
|||
w.isConnect = false |
|||
}() |
|||
defer w.conn.Close() |
|||
|
|||
if w.registry != nil { |
|||
_, err := w.conn.Write(w.registry) |
|||
if err != nil { |
|||
return true |
|||
} |
|||
} |
|||
|
|||
if err := w.conn.(*net.TCPConn).SetKeepAlive(true); err != nil { |
|||
return true |
|||
} |
|||
if err := w.conn.(*net.TCPConn).SetKeepAlivePeriod(30 * time.Second); err != nil { |
|||
return true |
|||
} |
|||
|
|||
connLostCh := make(chan interface{}) |
|||
defer close(connLostCh) |
|||
|
|||
// 检查远端bconn连接 |
|||
go func() { |
|||
defer func() { |
|||
log.Info("bconn check exit") |
|||
recover() // write to closed channel |
|||
}() |
|||
one := make([]byte, 1) |
|||
for { |
|||
if _, err := w.conn.Read(one); err != nil { |
|||
log.Info("bconn disconnected") |
|||
connLostCh <- err |
|||
return |
|||
} |
|||
time.Sleep(time.Second) |
|||
} |
|||
}() |
|||
|
|||
select { |
|||
case <-connLostCh: |
|||
time.Sleep(10 * time.Second) |
|||
return true |
|||
case <-w.exitCh: |
|||
return false |
|||
} |
|||
}(); !cont { |
|||
break |
|||
} else { |
|||
time.Sleep(time.Second) |
|||
} |
|||
} |
|||
} |
|||
``` |
|||
|
|||
|
|||
|
|||
引流测试未执行。。。 |
|||
|
|||
|
|||
|
|||
## DAC线上测试 |
|||
|
|||
配置如下 |
|||
|
|||
```json |
|||
|
|||
``` |
|||
|
|||
需要配置 `url.maps.json` |
|||
|
|||
```json |
|||
"47.106.112.113:1883" |
|||
"47.104.249.223:1883" |
|||
"mqtt.starwsn.com:1883" |
|||
"test.tdzntech.com:1883" |
|||
"mqtt.tdzntech.com:1883" |
|||
|
|||
"s1.cn.mqtt.theiota.cn:8883" |
|||
"mqtt.datahub.anxinyun.cn:1883" |
|||
|
|||
"218.3.126.49:3883" |
|||
"221.230.55.28:1883" |
|||
|
|||
"anxin-m1:1883" |
|||
"10.8.25.201:8883" |
|||
"10.8.25.231:1883" |
|||
"iota-m1:1883" |
|||
``` |
|||
|
|||
|
|||
|
|||
以下数据无法获取: |
|||
|
|||
1. gnss数据 |
|||
|
|||
http.get error: Get "http://10.8.25.254:7005/gnss/6542/data?startTime=1575443410000&endTime=1637628026000": dial tcp 10.8.25.254:7005: i/o timeout |
|||
|
|||
2. 时 |
|||
|
|||
|
|||
|
|||
## DAC内存问题排查 |
|||
|
|||
> 文档整理不够清晰,可以参考 https://www.cnblogs.com/gao88/p/9849819.html |
|||
> |
|||
> pprof的使用: |
|||
> |
|||
> https://segmentfault.com/a/1190000020964967 |
|||
> |
|||
> https://cizixs.com/2017/09/11/profiling-golang-program/ |
|||
|
|||
查看进程内存消耗: |
|||
|
|||
```sh |
|||
top -c |
|||
# shift+M |
|||
top - 09:26:25 up 1308 days, 15:32, 2 users, load average: 3.14, 3.70, 4.37 |
|||
Tasks: 582 total, 1 running, 581 sleeping, 0 stopped, 0 zombie |
|||
%Cpu(s): 5.7 us, 1.5 sy, 0.0 ni, 92.1 id, 0.0 wa, 0.0 hi, 0.8 si, 0.0 st |
|||
KiB Mem : 41147560 total, 319216 free, 34545608 used, 6282736 buff/cache |
|||
KiB Swap: 0 total, 0 free, 0 used. 9398588 avail Mem |
|||
|
|||
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND |
|||
18884 root 20 0 11.238g 0.010t 11720 S 48.8 26.7 39:52.43 ./dac |
|||
``` |
|||
|
|||
发现dac内存咱用超10G |
|||
|
|||
|
|||
|
|||
查看所在容器: |
|||
|
|||
```sh |
|||
root@iota-n3:/home/iota/etwatcher# systemd-cgls | grep 18884 |
|||
│ │ ├─32574 grep --color=auto 18884 |
|||
│ │ └─18884 ./dac |
|||
``` |
|||
|
|||
|
|||
|
|||
```sh |
|||
for i in $(docker container ls --format "{{.ID}}"); do docker inspect -f '{{.State.Pid}} {{.Name}}' $i; done | grep 18884 |
|||
``` |
|||
|
|||
定位到 dac-2 |
|||
|
|||
|
|||
|
|||
> 查看指定容器的pid可以使用“ |
|||
> |
|||
> docker top container_id |
|||
> |
|||
> 获取所有容器的PID |
|||
> |
|||
> ```sh |
|||
> for l in `docker ps -q`;do docker top $l|awk -v dn="$l" 'NR>1 {print dn " PID is " $2}';done |
|||
> ``` |
|||
> |
|||
> 通过docker inspect方式 |
|||
> |
|||
> ```sh |
|||
> docker inspect --format "{{.State.Pid}}" container_id/name |
|||
> ``` |
|||
|
|||
查看dac-2容器信息 |
|||
|
|||
```sh |
|||
root@iota-n3:~# docker ps | grep dac-2 |
|||
05b04c4667bc repository.anxinyun.cn/iota/dac "./dac" 2 hours ago Up 2 hours k8s_iota-dac_iota-dac-2_iota_d9879026-465b-11ec-ad00-c81f66cfe365_1 |
|||
be5682a82cda theiota.store/iota/filebeat "filebeat -e" 4 hours ago Up 4 hours k8s_iota-filebeat_iota-dac-2_iota_d9879026-465b-11ec-ad00-c81f66cfe365_0 |
|||
f23499bc5c22 gcr.io/google_containers/pause-amd64:3.0 "/pause" 4 hours ago Up 4 hours k8s_POD_iota-dac-2_iota_d9879026-465b-11ec-ad00-c81f66cfe365_0 |
|||
c5bcbf648268 repository.anxinyun.cn/iota/dac "./dac" 6 days ago Up 6 days k8s_iota-dac_iota-dac-2_iota_2364cf27-41a0-11ec-ad00-c81f66cfe365_0 |
|||
``` |
|||
|
|||
> 有两个?(另外一个僵尸进程先不管) |
|||
|
|||
|
|||
|
|||
进入容器: |
|||
|
|||
```sh |
|||
docker exec -it 05b04c4667bc /bin/ash |
|||
``` |
|||
|
|||
|
|||
|
|||
> 容器里没有 curl命令? |
|||
> |
|||
> 使用 wget -q -O - https://www.baidu.com 直接输出返回结果 |
|||
|
|||
|
|||
|
|||
在宿主机: |
|||
|
|||
```sh |
|||
go tool pprof -inuse_space http://10.244.1.235:6060/debug/pprof/heap |
|||
|
|||
# top 查看当前内存占用top10 |
|||
(pprof) top |
|||
Showing nodes accounting for 913.11MB, 85.77% of 1064.60MB total |
|||
Dropped 215 nodes (cum <= 5.32MB) |
|||
Showing top 10 nodes out of 109 |
|||
flat flat% sum% cum cum% |
|||
534.20MB 50.18% 50.18% 534.20MB 50.18% runtime.malg |
|||
95.68MB 8.99% 59.17% 95.68MB 8.99% iota/vendor/github.com/yuin/gopher-lua.newLTable |
|||
61.91MB 5.82% 64.98% 90.47MB 8.50% iota/vendor/github.com/yuin/gopher-lua.newFuncContext |
|||
50.23MB 4.72% 69.70% 50.23MB 4.72% iota/vendor/github.com/yuin/gopher-lua.newRegistry |
|||
34.52MB 3.24% 72.94% 34.52MB 3.24% iota/vendor/github.com/yuin/gopher-lua.(*LTable).RawSetString |
|||
33MB 3.10% 76.04% 33MB 3.10% iota/vendor/github.com/eclipse/paho%2emqtt%2egolang.outgoing |
|||
31MB 2.91% 78.95% 31MB 2.91% iota/vendor/github.com/eclipse/paho%2emqtt%2egolang.errorWatch |
|||
31MB 2.91% 81.87% 31MB 2.91% iota/vendor/github.com/eclipse/paho%2emqtt%2egolang.keepalive |
|||
27.06MB 2.54% 84.41% 27.06MB 2.54% iota/vendor/github.com/yuin/gopher-lua.newFunctionProto (inline) |
|||
14.50MB 1.36% 85.77% 14.50MB 1.36% iota/vendor/github.com/eclipse/paho%2emqtt%2egolang.alllogic |
|||
``` |
|||
|
|||
|
|||
|
|||
> 列出消耗最大的部分 top |
|||
> |
|||
> 列出函数代码以及对应的取样数据 list |
|||
> |
|||
> 汇编代码以及对应的取样数据 disasm |
|||
> |
|||
> web命令生成svg图 |
|||
|
|||
|
|||
|
|||
在服务器上执行go tool pprof后生成profile文件,拷贝到本机windows机器,执行 |
|||
|
|||
![image-20211116103902511](imgs/UCloud-DAC上云测试/image-20211116103902511.png) |
|||
|
|||
|
|||
|
|||
> 安装 graphviz |
|||
> |
|||
> https://graphviz.gitlab.io/_pages/Download/Download_windows.html |
|||
> |
|||
> 下载zip解压配置系统环境变量 |
|||
> |
|||
> ```sh |
|||
> C:\Users\yww08>dot -version |
|||
> dot - graphviz version 2.45.20200701.0038 (20200701.0038) |
|||
> There is no layout engine support for "dot" |
|||
> Perhaps "dot -c" needs to be run (with installer's privileges) to register the plugins? |
|||
> ``` |
|||
|
|||
> ```sh |
|||
> 执行dot初始化 |
|||
> |
|||
> dot -c |
|||
> ``` |
|||
|
|||
|
|||
|
|||
本机执行pprof |
|||
|
|||
```sh |
|||
go tool pprof --http=:8080 pprof.dac.alloc_objects.alloc_space.inuse_objects.inuse_space.003.pb.gz |
|||
``` |
|||
|
|||
!["sss"](imgs/UCloud-DAC上云测试/image-20211116112452820.png) |
|||
|
|||
内存的占用主要集中在: |
|||
|
|||
runtime malg |
|||
|
|||
去搜寻了大量资料之后,发现go的官网早就有这个issue(官方issue),大佬们知道,只是不好解决,描述如下: |
|||
Your observation is correct. Currently the runtime never frees the g objects created for goroutines, though it does reuse them. The main reason for this is that the scheduler often manipulates g pointers without write barriers (a lot of scheduler code runs without a P, and hence cannot have write barriers), and this makes it very hard to determine when a g can be garbage collected. |
|||
|
|||
大致原因就是go的gc采用的是并发垃圾回收,调度器在操作协程指针的时候不使用写屏障(可以看看draveness大佬的分析),因为调度器在很多执行的时候需要使用P(GPM),因此不能使用写屏障,所以调度器很难确定一个协程是否可以当成垃圾回收,这样调度器里的协程指针信息就会泄露。 |
|||
———————————————— |
|||
版权声明:本文为CSDN博主「wuyuhao13579」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。 |
|||
原文链接:https://blog.csdn.net/wuyuhao13579/article/details/109079570 |
|||
|
|||
|
|||
|
|||
找进程的日志: |
|||
|
|||
发现出问题的DAC日志重复出现 |
|||
|
|||
```sh |
|||
Loss connection |
|||
``` |
|||
|
|||
这是DAC代码中mqtt断连的时候触发的日志。查看源码: |
|||
|
|||
```go |
|||
func (d *Mqtt) Connect() (err error) { |
|||
|
|||
//TODO not safe |
|||
d.setConnStat(statInit) |
|||
//decode |
|||
|
|||
//set opts |
|||
opts := pahomqtt.NewClientOptions().AddBroker(d.config.URL) |
|||
opts.SetClientID(d.config.ClientID) |
|||
opts.SetCleanSession(d.config.CleanSessionFlag) |
|||
opts.SetKeepAlive(time.Second * time.Duration(d.config.KeepAlive)) // 30s |
|||
opts.SetPingTimeout(time.Second * time.Duration(d.config.KeepAlive*2)) |
|||
opts.SetConnectionLostHandler(func(c pahomqtt.Client, err error) { |
|||
// mqtt连接掉线时的回调函数 |
|||
log.Debug("[Mqtt] Loss connection, %s %v", err, d.config) |
|||
d.terminateFlag <- true |
|||
//d.Reconnect() |
|||
}) |
|||
} |
|||
``` |
|||
|
|||
|
|||
|
|||
## 对象存储(OSS) |
|||
|
|||
阿里云 OSS基础概念 https://help.aliyun.com/document_detail/31827.html |
|||
|
|||
|
|||
|
After Width: | Height: | Size: 66 KiB |
After Width: | Height: | Size: 49 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 35 KiB |
After Width: | Height: | Size: 109 KiB |
After Width: | Height: | Size: 96 KiB |
After Width: | Height: | Size: 55 KiB |
After Width: | Height: | Size: 57 KiB |
After Width: | Height: | Size: 146 KiB |
After Width: | Height: | Size: 14 KiB |
After Width: | Height: | Size: 326 KiB |
After Width: | Height: | Size: 51 KiB |
After Width: | Height: | Size: 18 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 9.4 KiB |
After Width: | Height: | Size: 72 KiB |
After Width: | Height: | Size: 2.0 MiB |
After Width: | Height: | Size: 75 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 16 KiB |
After Width: | Height: | Size: 49 KiB |
After Width: | Height: | Size: 11 KiB |
After Width: | Height: | Size: 212 KiB |
After Width: | Height: | Size: 49 KiB |
After Width: | Height: | Size: 74 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 35 KiB |
After Width: | Height: | Size: 30 KiB |
After Width: | Height: | Size: 32 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 23 KiB |
After Width: | Height: | Size: 115 KiB |
After Width: | Height: | Size: 62 KiB |
After Width: | Height: | Size: 92 KiB |
After Width: | Height: | Size: 28 KiB |
After Width: | Height: | Size: 1.1 MiB |
After Width: | Height: | Size: 228 KiB |
After Width: | Height: | Size: 677 KiB |
After Width: | Height: | Size: 116 KiB |
After Width: | Height: | Size: 7.9 KiB |
After Width: | Height: | Size: 50 KiB |
After Width: | Height: | Size: 18 KiB |
After Width: | Height: | Size: 60 KiB |
After Width: | Height: | Size: 5.2 KiB |
After Width: | Height: | Size: 109 KiB |
After Width: | Height: | Size: 2.0 MiB |
After Width: | Height: | Size: 75 KiB |
After Width: | Height: | Size: 27 KiB |
After Width: | Height: | Size: 16 KiB |
After Width: | Height: | Size: 49 KiB |
After Width: | Height: | Size: 686 KiB |
After Width: | Height: | Size: 686 KiB |
After Width: | Height: | Size: 10 KiB |
After Width: | Height: | Size: 81 KiB |
After Width: | Height: | Size: 36 KiB |
After Width: | Height: | Size: 11 KiB |
After Width: | Height: | Size: 51 KiB |
After Width: | Height: | Size: 72 KiB |