Browse Source

yingshi talk

release_0.0.2
巴林闲侠 2 years ago
parent
commit
ccaa56c1ea
  1. 20
      code/VideoAccess-VCMP/api/app/lib/controllers/camera/create.js
  2. 4
      code/VideoAccess-VCMP/api/app/lib/utils/camera.js
  3. 377
      code/VideoAccess-VCMP/web/client/src/components/videoPlayer/audioRecoder.jsx
  4. 9
      code/VideoAccess-VCMP/web/client/src/components/videoPlayer/videoOperation.jsx
  5. 2
      code/VideoAccess-VCMP/web/client/src/components/videoPlayer/videoOperationHistroyTime.jsx
  6. 85
      code/VideoAccess-VCMP/web/client/src/components/videoPlayer/videoOperationTalk.jsx
  7. 40
      code/VideoAccess-VCMP/web/client/src/components/videoPlayer/videoPlay.jsx
  8. 23
      code/VideoAccess-VCMP/web/client/src/utils/func.js
  9. 13
      code/VideoAccess-VCMP/web/client/src/utils/index.js
  10. 3
      code/VideoAccess-VCMP/web/client/src/utils/videoCloudControl.js
  11. 61
      code/VideoAccess-VCMP/web/client/src/utils/videoVoice.js
  12. 1
      code/VideoAccess-VCMP/web/client/src/utils/webapi.js
  13. 7
      code/VideoAccess-VCMP/web/config.js

20
code/VideoAccess-VCMP/api/app/lib/controllers/camera/create.js

@ -104,7 +104,7 @@ async function getNvrSteam (ctx) {
try {
const { models } = ctx.fs.dc
const { streamId } = ctx.query
const { utils: { getGbCameraLevel3ByStreamId } } = ctx.app.fs
const { utils: { getGbCameraLevel3ByStreamId, getPlayUrl } } = ctx.app.fs
const nvrRes = await models.Nvr.findOne({
where: {
@ -132,6 +132,7 @@ async function getNvrSteam (ctx) {
} else {
c.dataValues.camera = null
}
c.dataValues.playUrl = await getPlayUrl({ topSerialNo: streamId, serialNo: c.dataValues.streamid })
}
ctx.status = 200;
@ -365,7 +366,7 @@ async function getCascadeSipList (ctx) {
where: {
level: 0,
ipctype: '级联',
sipip: { $ne: null }
// sipip: { $ne: null }
}
})
ctx.status = 200;
@ -381,8 +382,8 @@ async function verifyCascadeCamera (ctx) {
let errMsg = '校验级联摄像头信息失败'
try {
const { utils: { verifyCascadeInfo } } = ctx.app.fs
const { sip } = ctx.request.body
await verifyCascadeInfo({ sip })
const { streamId } = ctx.request.body
await verifyCascadeInfo({ streamId })
ctx.status = 204;
} catch (error) {
ctx.fs.logger.error(`path: ${ctx.path}, error: ${error}`);
@ -397,12 +398,12 @@ async function getCascadeSteam (ctx) {
let errMsg = '获取级联摄像头视频流失败'
try {
const { models } = ctx.fs.dc
const { sip } = ctx.query
const { utils: { getGbCameraLevel3ByStreamId } } = ctx.app.fs
const { streamId } = ctx.query
const { utils: { getGbCameraLevel3ByStreamId, getPlayUrl } } = ctx.app.fs
const cascadeRes = await models.GbCamera.findOne({
where: {
sipip: sip,
streamid: streamId
}
})
if (!cascadeRes) {
@ -426,6 +427,7 @@ async function getCascadeSteam (ctx) {
} else {
c.dataValues.camera = null
}
c.dataValues.playUrl = await getPlayUrl({ topSerialNo: cascadeRes.streamid, serialNo: c.dataValues.streamid })
}
ctx.status = 200;
@ -471,9 +473,9 @@ async function createCascadeCamera (ctx) {
try {
const { models } = ctx.fs.dc
const { userId, token } = ctx.fs.api
const { sip, camera = [], externalDomain, cascadeType } = ctx.request.body
const { streamId, camera = [], externalDomain, cascadeType } = ctx.request.body
const { utils: { getGbCameraLevel3ByStreamId, verifyCascadeInfo } } = ctx.app.fs
const cameraParentRes = await verifyCascadeInfo({ sip })
const cameraParentRes = await verifyCascadeInfo({ streamId })
const allCameraRes = await getGbCameraLevel3ByStreamId({ streamId: cameraParentRes.streamid })

4
code/VideoAccess-VCMP/api/app/lib/utils/camera.js

@ -81,11 +81,11 @@ module.exports = function (app, opts) {
return gbCameraRes.dataValues
}
async function verifyCascadeInfo ({ sip } = {}) {
async function verifyCascadeInfo ({ streamId } = {}) {
const { models } = app.fs.dc
const gbCameraRes = await models.GbCamera.findOne({
where: {
sipip: sip,
streamid: streamId,
level: 0,
ipctype: '级联'
}

377
code/VideoAccess-VCMP/web/client/src/components/videoPlayer/audioRecoder.jsx

@ -0,0 +1,377 @@
import React from 'react'
import PropTypes from 'prop-types' // ES6
export const RecordState = Object.freeze({
START: 'start',
PAUSE: 'pause',
STOP: 'stop',
NONE: 'none'
})
export default class AudioReactRecorder extends React.Component {
//0 - constructor
constructor(props) {
super(props)
this.canvasRef = { current: null }
}
//TODO: add the props definitions
static propTypes = {
state: PropTypes.string,
type: PropTypes.string.isRequired,
backgroundColor: PropTypes.string,
foregroundColor: PropTypes.string,
canvasWidth: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),
canvasHeight: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),
//method calls
onStop: PropTypes.func
}
static defaultProps = {
state: RecordState.NONE,
type: 'audio/mpeg',
backgroundColor: 'rgb(200, 200, 200)',
foregroundColor: 'rgb(0, 0, 0)',
canvasWidth: 500,
canvasHeight: 300
}
//2 - mount
componentDidMount() {
this.init()
}
componentDidUpdate(prevProps, prevState) {
const { state } = this.props
this.checkState(prevProps.state, state)
}
checkState(previousState) {
switch (previousState) {
case RecordState.START:
this.doIfState(RecordState.PAUSE, this.pause)
this.doIfState(RecordState.STOP, this.stop)
break
case RecordState.PAUSE:
this.doIfState(RecordState.START, this.resume)
this.doIfState(RecordState.STOP, this.stop)
break
case RecordState.STOP:
this.doIfState(RecordState.START, this.start)
break
default:
this.doIfState(RecordState.START, this.start)
break
}
}
doIfState(state, cb) {
if (this.props.state == state) {
cb && cb()
}
}
//TODO: destroy request animation frame
componentWillUnmount() { }
//TODO: change to state some conditionals
init = async () => {
this.leftchannel = []
this.rightchannel = []
this.recorder = null
this.recording = false
this.recordingLength = 0
this.volume = null
this.audioInput = null
this.sampleRate = null
this.AudioContext = window.AudioContext || window.webkitAudioContext
this.context = null
this.analyser = null
this.canvas = this.canvasRef.current
this.canvasCtx = this.canvas.getContext('2d')
this.stream = null
this.tested = false
navigator.getUserMedia =
navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia
}
//get mic stream
getStream = (constraints) => {
if (!constraints) {
constraints = { audio: true, video: false }
}
return navigator.mediaDevices.getUserMedia(constraints)
}
setUpRecording = () => {
this.context = new this.AudioContext()
this.sampleRate = this.context.sampleRate
// creates a gain node
this.volume = this.context.createGain()
// creates an audio node from teh microphone incoming stream
this.audioInput = this.context.createMediaStreamSource(this.stream)
// Create analyser
this.analyser = this.context.createAnalyser()
// connect audio input to the analyser
this.audioInput.connect(this.analyser)
// connect analyser to the volume control
// analyser.connect(volume);
let bufferSize = 2048
this.recorder = this.context.createScriptProcessor(bufferSize, 2, 2)
// we connect the volume control to the processor
// volume.connect(recorder);
this.analyser.connect(this.recorder)
// finally connect the processor to the output
this.recorder.connect(this.context.destination)
const self = this
this.recorder.onaudioprocess = function (e) {
// Check
if (!self.recording) return
// Do something with the data, i.e Convert this to WAV
let left = e.inputBuffer.getChannelData(0)
let right = e.inputBuffer.getChannelData(1)
if (!self.tested) {
self.tested = true
// if this reduces to 0 we are not getting any sound
if (!left.reduce((a, b) => a + b)) {
console.log('Error: There seems to be an issue with your Mic')
// clean up;
self.stop()
self.stream.getTracks().forEach(function (track) {
track.stop()
})
self.context.close()
}
}
// we clone the samples
self.leftchannel.push(new Float32Array(left))
self.rightchannel.push(new Float32Array(right))
self.recordingLength += bufferSize
}
this.visualize()
}
mergeBuffers = (channelBuffer, recordingLength) => {
let result = new Float32Array(recordingLength)
let offset = 0
let lng = channelBuffer.length
for (let i = 0; i < lng; i++) {
let buffer = channelBuffer[i]
result.set(buffer, offset)
offset += buffer.length
}
return result
}
interleave = (leftChannel, rightChannel) => {
let length = leftChannel.length + rightChannel.length
let result = new Float32Array(length)
let inputIndex = 0
for (let index = 0; index < length;) {
result[index++] = leftChannel[inputIndex]
result[index++] = rightChannel[inputIndex]
inputIndex++
}
return result
}
writeUTFBytes = (view, offset, string) => {
let lng = string.length
for (let i = 0; i < lng; i++) {
view.setUint8(offset + i, string.charCodeAt(i))
}
}
visualize = () => {
const { backgroundColor, foregroundColor } = this.props
this.WIDTH = this.canvas.width
this.HEIGHT = this.canvas.height
this.CENTERX = this.canvas.width / 2
this.CENTERY = this.canvas.height / 2
if (!this.analyser) return
this.analyser.fftSize = 2048
const bufferLength = this.analyser.fftSize
const dataArray = new Uint8Array(bufferLength)
this.canvasCtx.clearRect(0, 0, this.WIDTH, this.HEIGHT)
//reference this using self
let self = this
const draw = function () {
self.drawVisual = requestAnimationFrame(draw)
self.analyser.getByteTimeDomainData(dataArray)
self.canvasCtx.fillStyle = backgroundColor
self.canvasCtx.fillRect(0, 0, self.WIDTH, self.HEIGHT)
self.canvasCtx.lineWidth = 2
self.canvasCtx.strokeStyle = foregroundColor
self.canvasCtx.beginPath()
var sliceWidth = (self.WIDTH * 1.0) / bufferLength
var x = 0
for (var i = 0; i < bufferLength; i++) {
var v = dataArray[i] / 128.0
var y = (v * self.HEIGHT) / 2
if (i === 0) {
self.canvasCtx.moveTo(x, y)
} else {
self.canvasCtx.lineTo(x, y)
}
x += sliceWidth
}
self.canvasCtx.lineTo(self.canvas.width, self.canvas.height / 2)
self.canvasCtx.stroke()
}
draw()
}
setupMic = async () => {
//TODO: only get stream after clicking start
try {
window.stream = this.stream = await this.getStream()
//TODO: on got stream
} catch (err) {
//TODO: error getting stream
console.log('Error: Issue getting mic', err)
}
this.setUpRecording()
}
start = async () => {
await this.setupMic()
this.recording = true
// reset the buffers for the new recording
this.leftchannel.length = this.rightchannel.length = 0
this.recordingLength = 0
}
stop = () => {
const { onStop, type } = this.props
this.recording = false
this.closeMic()
// we flat the left and right channels down
this.leftBuffer = this.mergeBuffers(this.leftchannel, this.recordingLength)
this.rightBuffer = this.mergeBuffers(
this.rightchannel,
this.recordingLength
)
// we interleave both channels together
let interleaved = this.interleave(this.leftBuffer, this.rightBuffer)
///////////// WAV Encode /////////////////
// from http://typedarray.org/from-microphone-to-wav-with-getusermedia-and-web-audio/
//
// we create our wav file
let buffer = new ArrayBuffer(44 + interleaved.length * 2)
let view = new DataView(buffer)
// RIFF chunk descriptor
this.writeUTFBytes(view, 0, 'RIFF')
view.setUint32(4, 44 + interleaved.length * 2, true)
this.writeUTFBytes(view, 8, 'WAVE')
// FMT sub-chunk
this.writeUTFBytes(view, 12, 'fmt ')
view.setUint32(16, 16, true)
view.setUint16(20, 1, true)
// stereo (2 channels)
view.setUint16(22, 2, true)
view.setUint32(24, this.sampleRate, true)
view.setUint32(28, this.sampleRate * 4, true)
view.setUint16(32, 4, true)
view.setUint16(34, 16, true)
// data sub-chunk
this.writeUTFBytes(view, 36, 'data')
view.setUint32(40, interleaved.length * 2, true)
// write the PCM samples
let lng = interleaved.length
let index = 44
let volume = 1
for (let i = 0; i < lng; i++) {
view.setInt16(index, interleaved[i] * (0x7fff * volume), true)
index += 2
}
// our final binary blob
const blob = new Blob([view], { type: type })
const audioUrl = URL.createObjectURL(blob)
onStop &&
onStop({
blob: blob,
url: audioUrl,
type
})
}
pause = () => {
this.recording = false
this.closeMic()
}
resume = () => {
this.setupMic()
this.recording = true
}
closeMic = () => {
this.stream.getAudioTracks().forEach((track) => {
track.stop()
})
this.audioInput.disconnect(0)
this.analyser.disconnect(0)
this.recorder.disconnect(0)
}
//1 - render
render() {
const { canvasWidth, canvasHeight } = this.props
return (
<div className='audio-react-recorder'>
<canvas
ref={(instance) => {
this.canvasRef.current = instance
}}
width={0}
height={0}
className='audio-react-recorder__canvas'
></canvas>
</div>
)
}
}

9
code/VideoAccess-VCMP/web/client/src/components/videoPlayer/videoOperation.jsx

@ -13,6 +13,7 @@ import './videoPlay.less';
const timeFormat = 'YYYY-MM-DD HH:mm:ss'
const VideoOperation = ({
ToastInCustom,
operationState, operation,
voiceDisY, setVoiceDisY,
processDisX, setProcessDisX,
@ -52,12 +53,16 @@ const VideoOperation = ({
operationState.control.select ?
<VideoOperationCloudControl videoObj={videoObj} /> :
operationState.talk.select ?
<VideoOperationTalk /> :
<VideoOperationTalk
videoObj={videoObj}
/> :
'' : ''
}
{
showTimeSelect ?
<VideoOperationHistroyTime close={() => { setShowTimeSelect(false) }} histroyTime={histroyTime} setHistroyTime={setHistroyTime} setProcessDisX={setProcessDisX} />
<VideoOperationHistroyTime
close={() => { setShowTimeSelect(false) }} histroyTime={histroyTime} setHistroyTime={setHistroyTime} setProcessDisX={setProcessDisX}
/>
: ''
}
{/* 下方操作 */}

2
code/VideoAccess-VCMP/web/client/src/components/videoPlayer/videoOperationHistroyTime.jsx

@ -54,7 +54,7 @@ const VideoOperationHistroyTime = ({ close, histroyTime, setHistroyTime, setProc
</div>
<div style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between', }}>
<span style={{ color: '#FF7100' }}>
<img src="/assets/images/background/warning.png" height={14} style={{ position: 'relative', top: 2, marginRight: 2 }} />
<img src={`/assets/images/background/warning.png`} height={14} style={{ position: 'relative', top: 2, marginRight: 2 }} />
最长时间跨度不超过72小时
</span>
<span>

85
code/VideoAccess-VCMP/web/client/src/components/videoPlayer/videoOperationTalk.jsx

@ -1,19 +1,92 @@
import React, { useState, useEffect, useRef } from "react";
import { connect } from "react-redux";
import request from 'superagent'
import './videoPlay.less';
import { DatePicker, Toast, ToastFactory, Space } from '@douyinfe/semi-ui';
import { checkAudioVideo, uploadVoice2Yingshi, sendVoice2YingshiCamera } from '$utils';
import AudioRecoder, { RecordState } from "./audioRecoder"
const VideoOperationTalk = ({ }) => {
const VideoOperationTalk = ({
videoObj,
}) => {
const [recordState, setRecordState] = useState(RecordState.NONE)
const ToastInCustomContainer = ToastFactory.create({
getPopupContainer: () => document.getElementById('vcmp_videoplay'),
});
useEffect(() => {
}, [])
const startTalk = () => {
setRecordState(RecordState.START)
}
const stopTalk = () => {
setRecordState(RecordState.STOP)
}
const onStopTalk = async (data) => {
console.log('stopTalk', data.blob)
setRecordState(RecordState.STOP)
const { blob: audioData } = data;
if (!audioData) return;
let buffer = await audioData.arrayBuffer();
let file = new File([buffer], Date.now() + "", {
type: "audio/mpeg"
});
try {
let uploadRes = await uploadVoice2Yingshi({ voiceFile: file, accessToken: videoObj.yingshiToken, })
const { url } = uploadRes
let sendRes = await sendVoice2YingshiCamera({
accessToken: videoObj.yingshiToken,
deviceSerial: videoObj.serialNo,
channelNo: videoObj.channelNo,
fileUrl: url
});
if (sendRes && sendRes.code == 200) {
ToastInCustomContainer.success('已发送');
} else {
console.log(sendRes)
}
} catch (error) {
console.error(error);
}
}
return (
<div style={{
position: 'absolute', top: 'calc(50% - 88px)', left: 'calc(50% - 156px)',
width: 312, height: 176, backgroundColor: '#000000A5',
width: 312, height: 186, backgroundColor: '#000000A5',
}}>
<img src="/assets/images/background/talking.png" style={{ display: 'block', margin: '12px auto' }} />
<div style={{
<img src={`/assets/images/background/${recordState == RecordState.START ? 'talking' : 'talk'}.png`} style={{ display: 'block', margin: '12px auto' }} />
<div
style={{
height: 32, width: 88, textAlign: 'center', margin: 'auto', color: '#fff', backgroundColor: '#1859C1',
lineHeight: '32px'
}}>开始讲话</div>
lineHeight: '32px', cursor: 'pointer'
}}
onClick={() => {
checkAudioVideo({ audio: true }).then(res => {
// console.log(',');
if (recordState === RecordState.START) {
stopTalk()
} else {
startTalk()
}
}).catch(err => {
ToastInCustomContainer.destroyAll()
if (err.code && err.code == 404) {
ToastInCustomContainer.error("浏览器不支持")
} else {
ToastInCustomContainer.error("请检查是否存在麦克风,或是否禁用麦克风")
}
})
}}
>{recordState == RecordState.START ? '结束' : '开始'}讲话</div>
<AudioRecoder state={recordState} onStop={onStopTalk} />
</div>
)
}

40
code/VideoAccess-VCMP/web/client/src/components/videoPlayer/videoPlay.jsx

@ -3,7 +3,8 @@ import { connect } from "react-redux";
import screenfull from 'screenfull';
import moment from "moment";
import request from 'superagent'
import { VideoServeApi, IotVideoServerRequest } from '$utils'
import { VideoServeApi, IotVideoServerRequest, checkAudioVideo } from '$utils'
import { ToastFactory, } from '@douyinfe/semi-ui';
import VideoHeader from './voiceHeader'
import VideoOperation from './videoOperation'
import './videoPlay.less';
@ -20,28 +21,28 @@ const VideoPlay = ({
// videoObj,
// videoObj = {
// type: 'yingshi',
// audio: false,
// serialNo: 'G75922040', //
// channelNo: 1, //
// yingshiToken: 'at.3j6eyqbn0g5xvcut73v0rzdu1nh0gnxx-4ua03m82o9-12u1t9g-rtzndpyds', //
// playUrlSd: 'ezopen://open.ys7.com/G75922040/1.live', //
// // playUrl: 'ws://221.230.55.27:8081/jessica/34020000001110000077/34020000001310000003',
// playUrlHd: 'ezopen://open.ys7.com/G75922040/1.hd.live',
// // replayUrl: 'ezopen://open.ys7.com/G75922040/1.rec',
// },
videoObj = {
type: 'cascade',
type: 'yingshi',
audio: false,
serialNo: '34020000001310000001', //
topSerialNo: '34020000001110000077', //
playUrlSd: 'ws://221.230.55.27:8081/jessica/34020000001110000077/34020000001310000001', //
// playUrlHd: 'ezopen://open.ys7.com/G75922040/1.hd.live',
serialNo: 'G75922040', //
channelNo: 1, //
yingshiToken: 'at.3j6eyqbn0g5xvcut73v0rzdu1nh0gnxx-4ua03m82o9-12u1t9g-rtzndpyds', //
playUrlSd: 'ezopen://open.ys7.com/G75922040/1.live', //
// playUrl: 'ws://221.230.55.27:8081/jessica/34020000001110000077/34020000001310000003',
playUrlHd: 'ezopen://open.ys7.com/G75922040/1.hd.live',
// replayUrl: 'ezopen://open.ys7.com/G75922040/1.rec',
},
// videoObj = {
// type: 'cascade',
// audio: false,
// serialNo: '34020000001310000001', //
// topSerialNo: '34020000001110000077', //
// playUrlSd: 'ws://221.230.55.27:8081/jessica/34020000001110000077/34020000001310000001', //
// // playUrlHd: 'ezopen://open.ys7.com/G75922040/1.hd.live',
// // replayUrl: 'ezopen://open.ys7.com/G75922040/1.rec',
// },
//
iotVideoServer
}) => {
@ -90,6 +91,7 @@ const VideoPlay = ({
setoperationState(nextOperationState)
}
//
const operation = [{
key: 'control',
click: () => {
@ -97,6 +99,7 @@ const VideoPlay = ({
}
}, {
key: 'talk',
hide: !(videoObj.type == 'yingshi'),
click: () => {
changeSelectState('talk')
}
@ -302,6 +305,7 @@ const VideoPlay = ({
}
}
return (
<>
<div className="vcmp_videoplay" style={{ height: height || '100%', width: width || '100%', overflow: 'hidden' }}>

23
code/VideoAccess-VCMP/web/client/src/utils/func.js

@ -8,3 +8,26 @@ export const isAuthorized = (authcode) => {
return false;
}
}
export const checkAudioVideo = (constraintsData) => {
if (navigator.mediaDevices === undefined) {
navigator.mediaDevices = {};
}
if (navigator.mediaDevices.getUserMedia === undefined) {
navigator.mediaDevices.getUserMedia = function (constraints) {
// 首先,如果有getUserMedia的话,就获得它
var getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
// 一些浏览器根本没实现它 - 那么就返回一个error到promise的reject来保持一个统一的接口
if (!getUserMedia) {
return Promise.reject({ code: 404 });
}
// 否则,为老的navigator.getUserMedia方法包裹一个Promise
return new Promise(function (resolve, reject) {
getUserMedia.call(navigator, constraints, resolve, reject);
});
}
}
return navigator.mediaDevices.getUserMedia(constraintsData)
}

13
code/VideoAccess-VCMP/web/client/src/utils/index.js

@ -1,19 +1,22 @@
'use strict';
import { isAuthorized } from './func';
import { isAuthorized, checkAudioVideo } from './func';
import { AuthorizationCode } from './authCode';
import {
ApiTable, VideoServeApi,
RouteTable,
AuthRequest, AxyRequest, IotVideoServerRequest
AuthRequest, AxyRequest, IotVideoServerRequest, YingshiRequest
} from './webapi'
import {
YS_PTZ_DIRECTION, ysptz,
gbptz
} from './videoCloudControl';
import {
uploadVoice2Yingshi, sendVoice2YingshiCamera
} from './videoVoice';
export {
isAuthorized,
checkAudioVideo,
AuthorizationCode,
ApiTable,
@ -22,9 +25,13 @@ export {
AuthRequest,
AxyRequest,
IotVideoServerRequest,
YingshiRequest,
YS_PTZ_DIRECTION,
ysptz,
uploadVoice2Yingshi,
sendVoice2YingshiCamera,
gbptz,
}

3
code/VideoAccess-VCMP/web/client/src/utils/videoCloudControl.js

@ -28,7 +28,8 @@ export function ysptz (ac, { serialNo, yingshiToken, channelNo }) {
}
let startReqBody = Object.assign({}, params, { speed: 1 })
let stopReqBody = params
let requestUrl = `https://open.ys7.com/api/lapp/device/ptz/`
// let requestUrl = `https://open.ys7.com/api/lapp/device/ptz/`
let requestUrl = `/_yingshi/api/lapp/device/ptz/`
superagent
.post(requestUrl + 'start')
.send(startReqBody)

61
code/VideoAccess-VCMP/web/client/src/utils/videoVoice.js

@ -0,0 +1,61 @@
'use strict';
import superagent from "superagent"
import moment from "moment";
import { YingshiRequest } from './'
export const uploadVoice2Yingshi = async ({ voiceFile, accessToken, voiceName, }) => {
const fData = new FormData();
fData.append('voiceFile', voiceFile);
fData.append("accessToken", accessToken)
fData.append("voiceName", voiceName || moment().format("YYYYMMDDHHmmssSSS"))
fData.append("force", true)
console.log(fData);
// TODO 代理转发为什么不行捏
// const upRslt = await YingshiRequest.post('api/lapp/voice/upload', fData)
const upRslt = await
new Promise((resolve, reject) => {
superagent
//.post('/_yingshi/api/lapp/voice/upload')
.post('https://open.ys7.com/api/lapp/voice/upload')
.send(fData)
.end(function (err, res) {
if (err) {
reject(err)
} else {
resolve(res.body)
}
})
})
if (upRslt.code == 200) {
return upRslt.data
} else {
throw upRslt
}
}
export const sendVoice2YingshiCamera = async ({ accessToken, deviceSerial, channelNo = 1, fileUrl }) => {
const sendRslt = await
new Promise((resolve, reject) => {
superagent
.post('https://open.ys7.com/api/lapp/voice/send')
.send({
accessToken, deviceSerial, channelNo, fileUrl
})
.set('Content-Type', 'application/x-www-form-urlencoded')
.end(function (err, res) {
if (err) {
reject(err)
} else {
resolve(res.body)
}
})
})
console.log(sendRslt);
if (sendRslt.code === 200) {
return sendRslt.data
} else {
throw sendRslt
}
}

1
code/VideoAccess-VCMP/web/client/src/utils/webapi.js

@ -4,6 +4,7 @@ import { ProxyRequest } from "@peace/utils";
export const AuthRequest = new ProxyRequest("_auth");
export const AxyRequest = new ProxyRequest("_axy");
export const IotVideoServerRequest = new ProxyRequest("_vs");
export const YingshiRequest = new ProxyRequest("_yingshi");
export const ApiTable = {
login: "login",

7
code/VideoAccess-VCMP/web/config.js

@ -58,6 +58,13 @@ const product = {
host: IOT_VIDEO_SERVER,
match: /^\/_vs\//,
}
},{
// TODO 还不能用
entry: require('./middlewares/proxy').entry,
opts: {
host: 'https://open.ys7.com',
match: /^\/_yingshi\//,
}
}, {
entry: require('./routes').entry,
opts: {

Loading…
Cancel
Save