2000字范文,分享全网优秀范文,学习好帮手!
2000字范文 > vue实现调用摄像头百度ai人脸 实现人脸情绪识别

vue实现调用摄像头百度ai人脸 实现人脸情绪识别

时间:2024-06-12 12:55:45

相关推荐

vue实现调用摄像头百度ai人脸 实现人脸情绪识别

后面页面完整代码哦~~~
首先说一下,我这里用到了axios element-ui的loading

先看一下我的效果图吧

说明:我这里是调用摄像头识别人脸,根据返回的信息,再加以描述,右边的小图片是,根据返回的人脸位置,宽度大小截取出来的

首先我们得申请一个百度ai人脸识别的接口,并且获取key 和secret 这样才能后续使用。

第一步:先写html

<template><div class="main"><div class="cam"><div class="video-box"><div class="videos"><videoid="video"style="width: 100%;height: 100%; object-fit: fill;"preloadautoplayloopmuted></video></div><div class="button-box" @click="submit()"><img src="../assets/cam.png" /></div></div><div class="title-box">识别分析</div><div class="right-box"><divref="mainscroll"class="face-box"v-loading="loading"element-loading-text="拼命加载中"element-loading-spinner="el-icon-loading"element-loading-background="transparent"><divclass="details"v-for="(item, index) in threeImageArray":key="index"><div class="image-box"><img :src="item.image" /></div><div class="list-box"><div class="sex-one">年龄:{{ item.age }}</div> </div></div></div></div><div class="canva-box"><canvas ref="canvas" id="canvas" width="1000" height="700"></canvas></div></div></div></template>

说明:因为需求时只显示摄像头,所以,我把截图的图片canvas部分放在了摄像头的底层,大家根据需求来写css样式吧

第二步:写css

<style scoped>.main {width: 100%;height: 100vh;display: flex;align-items: center;justify-content: center;}.cam {width: 50%;height: 90%;display: flex;flex-direction: column;align-items: center;background-color: #fff;border-radius: 20px;}.canva-box {width: 100%;display: flex;align-items: center;justify-content: center;position: absolute;top: 0;z-index: -99;}.button-box {position: absolute;top: 35%;border: 2px solid #fff;padding: calc(100vw * 20 / 1920);border-radius: 50%;}.button-box img {width: calc(100vw * 40 / 1920);height: calc(100vw * 40 / 1920);}.title-box {height: 5%;font-size: calc(100vw * 40 / 1920);font-weight: bold;width: 40%;}.right-box {width: 100%;height: 30%;display: flex;flex-direction: column;align-items: center;justify-content: space-evenly;}.video-box {width: 100%;height: 65%;display: flex;justify-content: center;}.videos {width: 100%;height: 100%;}.face-box {width: 100%;height: 100%;display: flex;flex-direction: column;align-items: center;overflow: auto;}.details {display: flex;width: 100%;height: 90%;margin-top: 4px;}.image-box {width: 30%;height: 100%;display: flex;justify-content: center;}.image-box img {width: calc(100vw * 150 / 1920);height: calc(100vw * 150 / 1920);}.list-box {width: 60%;height: calc(100vw * 300 / 1920);display: flex;flex-direction: column;text-indent: calc(100vw * 16 / 1920);font-size: calc(100vw * 18 / 1920);padding-bottom: 20px;}</style>

第三步:下载axios npm i axios

第四步:下载element-ui

npm i element-ui

//引入 使用

import ElementUI from "element-ui";

import "element-ui/lib/theme-chalk/index.css";

// 引入echarts

import * as echarts from "echarts";

Vue.prototype.$echarts = echarts;

第五步:js部分

首先调用摄像头(这里我也是根据网上找的,但是忘记哪个文章了)

//可以是点击方法,可以放在mounted()中,看自己的需求getCompetence() {var _this = this;this.thisCancas = document.getElementById("canvas");this.thisContext = this.thisCancas.getContext("2d");this.thisVideo = document.getElementById("video");// 旧版本浏览器可能根本不支持mediaDevices,我们首先设置一个空对象if (navigator.mediaDevices === undefined) {navigator.mediaDevices = {};}// 一些浏览器实现了部分mediaDevices,我们不能只分配一个对象// 使用getUserMedia,因为它会覆盖现有的属性。// 这里,如果缺少getUserMedia属性,就添加它。if (navigator.mediaDevices.getUserMedia === undefined) {navigator.mediaDevices.getUserMedia = function(constraints) {// 首先获取现存的getUserMedia(如果存在)var getUserMedia =navigator.webkitGetUserMedia ||navigator.mozGetUserMedia ||navigator.getUserMedia;// 有些浏览器不支持,会返回错误信息// 保持接口一致if (!getUserMedia) {return Promise.reject(new Error("getUserMedia is not implemented in this browser"));}// 否则,使用Promise将调用包装到旧的navigator.getUserMediareturn new Promise(function(resolve, reject) {getUserMedia.call(navigator, constraints, resolve, reject);});};}var constraints = {audio: false,video: {width: this.videoWidth,height: this.videoHeight,transform: "scaleX(-1)"}};navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {// 旧的浏览器可能没有srcObjectif ("srcObject" in _this.thisVideo) {_this.thisVideo.srcObject = stream;} else {// 避免在新的浏览器中使用它,因为它正在被弃用。_this.thisVideo.src = window.URL.createObjectURL(stream);}_this.thisVideo.onloadedmetadata = function(e) {_this.thisVideo.play();};}).catch(err => {console.log(err);});},

记得销毁调用摄像头哦!

// 关闭摄像头 在vue生命周期的销毁页面中写this.trackerTask.closeCamera();

接下来先把调用百度ai 部分写出来

async detectFace(imageData) {//access_token需要通过申请的key 和secret来获取,登录百度ai文档中就有教程this.loading = true;try {const response = await axios.post("/rest/2.0/face/v3/detect?access_token=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",{image: imageData,image_type: "BASE64",face_field: "age,gender,emotion,glasses,mask,expression",max_face_num: 10});// 处理接口返回的结果if (response.data.error_code == 0) {//这里是接口返回的打印console.log("调用成功显示结果", response);this.faceInfomation = response.data.result;this.faceArray = response.data.result.face_list;//这里我用一个数组接收返回的人脸信息this.faceArray.forEach((item, i) => {console.log("item", item);//这里是人脸的位置宽高信息const left = parseInt(item.location.left);const top = parseInt(item.location.top);const width = parseInt(item.location.width);const height = parseInt(item.location.height);const faceCanvas = document.createElement("canvas");faceCanvas.width = width;faceCanvas.height = height;//将截取的人脸再重新铺到canvas画布上,好展示出来const faceCtx = faceCanvas.getContext("2d");faceCtx.drawImage(this.canvas,left,top,width,height,0,0,width,height);this.faceImg = faceCanvas.toDataURL("image/png");//将截取的头像放在新的数组中this.newImageArray.push(this.faceImg);});//因为截取的人脸头像和人脸文字描述不是一个数组,所以将两个数组合并一个数组了,这样在遍历循环列表的时候,更好的画css样式 this.threeImageArray = this.faceArray.map((item, index) => {return { ...item, image: this.newImageArray[index] };});}} catch (error) {console.error(error);} finally {this.loading = false;}},

如果有不清晰的地方可以留言评论,我会会的,虽然不知道对不对哈哈

摄像头获取的图片放在canvas是上后,转成接口可用的类型

submit() {console.log("点击了这个");//每次点击的时候都清空一下上一次的内容this.newImageArray = [];this.faceArray = [];//loading加载this.fullscreenLoading = true;setTimeout(() => {this.fullscreenLoading = false;}, 2000);let that = this;let canvas = document.getElementById("canvas");let context = canvas.getContext("2d");let video = document.getElementById("video");//将摄像头拍下的照片放在canvas画布上context.drawImage(video, 0, 0, 1000, 700);//将图片转换成 接口需要的类型canvas.toBlob(blob => {var reader = new FileReader();reader.onloadend = () => {this.imageData = reader.result;this.newimage = this.imageData.replace(/^data:image\/\w+;base64,/,"");//截取一下图片this.headerimg = this.imageData.slice(5, 15);this.imageArray.push(this.headerimg);//调用识别方法this.detectFace(this.newimage);};reader.readAsDataURL(blob);});},

完整代码

<template><div class="main"><div class="cam"><div class="video-box"><div class="videos"><videoid="video"style="width: 100%;height: 100%; object-fit: fill;"preloadautoplayloopmuted></video></div><div class="button-box" @click="submit()"><img src="../assets/cam.png" /></div></div><div class="title-box">识别分析</div><div class="right-box"><divref="mainscroll"class="face-box"v-loading="loading"element-loading-text="拼命加载中"element-loading-spinner="el-icon-loading"element-loading-background="transparent"><divclass="details"v-for="(item, index) in threeImageArray":key="index"><div class="image-box"><img :src="item.image" /></div><div class="list-box"><div class="sex-one">年龄:{{ item.age }}</div> </div></div></div></div><div class="canva-box"><canvas ref="canvas" id="canvas" width="1000" height="700"></canvas></div></div></div></template><script>import axios from "axios";export default {name: "testTracking",data() {return {loading: false,contentAnwer: false,contentShow: true,API_KEY: "这里是你申请的key",SECRET_KEY: "这里是你申请的secret",imageData: null,newimage: null,faceInfomation: {},faceArray: [],age: null,imageArray: [],headerimg: "",location: {},left: 0,top: 0,width: 0,height: 0,faceImg: null,canvas: null,newImageArray: [],threeImageArray: []};},methods: {submit() {console.log("点击了这个");//每次点击的时候都清空一下上一次的内容this.newImageArray = [];this.faceArray = [];//loading加载this.fullscreenLoading = true;setTimeout(() => {this.fullscreenLoading = false;}, 2000);let that = this;let canvas = document.getElementById("canvas");let context = canvas.getContext("2d");let video = document.getElementById("video");//将摄像头拍下的照片放在canvas画布上context.drawImage(video, 0, 0, 1000, 700);//将图片转换成 接口需要的类型canvas.toBlob(blob => {var reader = new FileReader();reader.onloadend = () => {this.imageData = reader.result;this.newimage = this.imageData.replace(/^data:image\/\w+;base64,/,"");//截取一下图片this.headerimg = this.imageData.slice(5, 15);this.imageArray.push(this.headerimg);//调用识别方法this.detectFace(this.newimage);};reader.readAsDataURL(blob);});},//可以是点击方法,可以放在mounted()中,看自己的需求getCompetence() {var _this = this;this.thisCancas = document.getElementById("canvas");this.thisContext = this.thisCancas.getContext("2d");this.thisVideo = document.getElementById("video");// 旧版本浏览器可能根本不支持mediaDevices,我们首先设置一个空对象if (navigator.mediaDevices === undefined) {navigator.mediaDevices = {};}// 一些浏览器实现了部分mediaDevices,我们不能只分配一个对象// 使用getUserMedia,因为它会覆盖现有的属性。// 这里,如果缺少getUserMedia属性,就添加它。if (navigator.mediaDevices.getUserMedia === undefined) {navigator.mediaDevices.getUserMedia = function(constraints) {// 首先获取现存的getUserMedia(如果存在)var getUserMedia =navigator.webkitGetUserMedia ||navigator.mozGetUserMedia ||navigator.getUserMedia;// 有些浏览器不支持,会返回错误信息// 保持接口一致if (!getUserMedia) {return Promise.reject(new Error("getUserMedia is not implemented in this browser"));}// 否则,使用Promise将调用包装到旧的navigator.getUserMediareturn new Promise(function(resolve, reject) {getUserMedia.call(navigator, constraints, resolve, reject);});};}var constraints = {audio: false,video: {width: this.videoWidth,height: this.videoHeight,transform: "scaleX(-1)"}};navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {// 旧的浏览器可能没有srcObjectif ("srcObject" in _this.thisVideo) {_this.thisVideo.srcObject = stream;} else {// 避免在新的浏览器中使用它,因为它正在被弃用。_this.thisVideo.src = window.URL.createObjectURL(stream);}_this.thisVideo.onloadedmetadata = function(e) {_this.thisVideo.play();};}).catch(err => {console.log(err);});},async detectFace(imageData) {//access_token需要通过申请的key 和secret来获取,登录百度ai文档中就有教程this.loading = true;try {const response = await axios.post("/rest/2.0/face/v3/detect?access_token=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",{image: imageData,image_type: "BASE64",face_field: "age,gender,emotion,glasses,mask,expression",max_face_num: 10});// 处理接口返回的结果if (response.data.error_code == 0) {//这里是接口返回的打印console.log("调用成功显示结果", response);this.faceInfomation = response.data.result;this.faceArray = response.data.result.face_list;//这里我用一个数组接收返回的人脸信息this.faceArray.forEach((item, i) => {console.log("item", item);//这里是人脸的位置宽高信息const left = parseInt(item.location.left);const top = parseInt(item.location.top);const width = parseInt(item.location.width);const height = parseInt(item.location.height);const faceCanvas = document.createElement("canvas");faceCanvas.width = width;faceCanvas.height = height;//将截取的人脸再重新铺到canvas画布上,好展示出来const faceCtx = faceCanvas.getContext("2d");faceCtx.drawImage(this.canvas,left,top,width,height,0,0,width,height);this.faceImg = faceCanvas.toDataURL("image/png");//将截取的头像放在新的数组中this.newImageArray.push(this.faceImg);});//因为截取的人脸头像和人脸文字描述不是一个数组,所以将两个数组合并一个数组了,这样在遍历循环列表的时候,更好的画css样式 this.threeImageArray = this.faceArray.map((item, index) => {return { ...item, image: this.newImageArray[index] };});}} catch (error) {console.error(error);} finally {this.loading = false;}},},mounted() {this.getCompetence();this.canvas = this.$refs.canvas;},computed: {},destroyed() {// 关闭摄像头this.trackerTask.closeCamera();}};</script><style scoped>.main {width: 100%;height: 100vh;display: flex;align-items: center;justify-content: center;}.cam {width: 50%;height: 90%;display: flex;flex-direction: column;align-items: center;background-color: #fff;border-radius: 20px;}.canva-box {width: 100%;display: flex;align-items: center;justify-content: center;position: absolute;top: 0;z-index: -99;}.button-box {position: absolute;top: 35%;border: 2px solid #fff;padding: calc(100vw * 20 / 1920);border-radius: 50%;}.button-box img {width: calc(100vw * 40 / 1920);height: calc(100vw * 40 / 1920);}.title-box {height: 5%;font-size: calc(100vw * 40 / 1920);font-weight: bold;width: 40%;}.right-box {width: 100%;height: 30%;display: flex;flex-direction: column;align-items: center;justify-content: space-evenly;}.video-box {width: 100%;height: 65%;display: flex;justify-content: center;}.videos {width: 100%;height: 100%;}.face-box {width: 100%;height: 100%;display: flex;flex-direction: column;align-items: center;overflow: auto;}.details {display: flex;width: 100%;height: 90%;margin-top: 4px;}.image-box {width: 30%;height: 100%;display: flex;justify-content: center;}.image-box img {width: calc(100vw * 150 / 1920);height: calc(100vw * 150 / 1920);}.list-box {width: 60%;height: calc(100vw * 300 / 1920);display: flex;flex-direction: column;text-indent: calc(100vw * 16 / 1920);font-size: calc(100vw * 18 / 1920);padding-bottom: 20px;}</style>

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。