-
zhangwei
2024-08-21 9efb46fe04b3bb9098e92979ae2c658256446f25
src/pages/mine/apply.vue
@@ -39,10 +39,18 @@
            <up-form-item label="简历描述" required prop="userInfo.name" borderBottom ref="item1">
               <up-textarea v-model="state.model1.userInfo.name" placeholder="请输入简历描述"></up-textarea>
            </up-form-item>
            <cameraBufferJpg/>
            <view class="text-area">
               <camera device-position="front" style="width: 100%; height: 600rpx;"></camera>
            </view>
            <view class="tabbtns">
               <up-button color='#fece01' class="text-69" text="人脸识别" @click='IaiAddPerso'></up-button>
            </view>
            <!-- <up-popup :show="show" @close="close" @open="open"> -->
            <!-- <tem-session/> -->
            <!-- </up-popup> -->
            <!-- <view class="tabbtns">
               <up-button color='#fece01' class="text-69" text="保存"></up-button>
            </view> -->
@@ -53,6 +61,11 @@
</template>
<script setup>
   import {
      onLoad,
      onShow
   } from "@dcloudio/uni-app";
   import {
      apiBaseUrl
   } from '@/common/setting/constVarsHelper.js';
@@ -67,6 +80,21 @@
      ref,
      reactive
   } from 'vue';
   let webgl = ref()
   onLoad(() => {
      let cameraEngine = wx.createCameraContext()
      // 3、获取 Camera 实时帧数据
      const listener = cameraEngine.onCameraFrame((frame) => {
         // console.log(frame)
         if (!status.value) {
            return
         }
         jiance(frame)
      })
      // 5、开始监听帧数据
      listener.start()
   })
   let status = ref(false)
   let radiolist1 = [{
         name: '男',
      },
@@ -105,33 +133,59 @@
   // 使用 ref 创建响应式引用  
   const formRef = ref(null);
   const session = wx.createVKSession({
    track: {
       face: {mode: 2}
    },
    version: 'v1',
   })
   // 逐帧分析
   const onFrame = timestamp => {
     // 开发者可以自己控制帧率
     const frame = session.getVKFrame(canvasWidth, canvasHeight)
       if (frame) {
         // 分析完毕,可以拿到帧对象
         doRender(frame)
       }
   
     session.requestAnimationFrame(onFrame)
   }
   session.start(err => {
     if (!err) session.requestAnimationFrame(onFrame)
   })
   
   // 渲染函数
   const doRender = frame => {
     // ...
   }
   const IaiAddPerso = (e) => {
      onFrame()
      status.value = !status.value
      // wx.startFacialRecognitionVerify()
      // const session = wx.createVKSession({
      //    track: {
      //       face: {
      //          mode: 1
      //       } // mode: 1 - 使用摄像头;2 - 手动传入图像
      //    },
      // })
      // // 静态图片检测模式下,每调一次 detectFace 接口就会触发一次 updateAnchors 事件
      // session.on('updateAnchors', anchors => {
      //    anchors.forEach(anchor => {
      //       console.log('anchor.points', anchor.points)
      //       console.log('anchor.origin', anchor.origin)
      //       console.log('anchor.size', anchor.size)
      //       console.log('anchor.angle', anchor.angle)
      //    })
      // })
      // // 需要调用一次 start 以启动
      // session.start(errno => {
      //    if (errno) {
      //       console.log(errno)
      //       // 如果失败,将返回 errno
      //    } else {
      //       // 否则,返回null,表示成功
      //       session.detectFace({
      //          frameBuffer, // 图片 ArrayBuffer 数据。人脸图像像素点数据,每四项表示一个像素点的 RGBA
      //          width, // 图像宽度
      //          height, // 图像高度
      //          scoreThreshold: 0.5, // 评分阈值
      //          sourceType: 1,
      //          modelMode: 1,
      //       })
      //    }
      // })
      // wx.chooseMedia({
      //    count:1,
      //    sourceType: ['camera'],
      //    mediaType: ['image'],
      //    maxDuration: 60,
      //    camera: 'front',
      //    success(res) {
      //       for (let i = 0; i < res.tempFiles.length; i++) {
      //          console.log("===111===" + res.tempFiles[i].tempFilePath)
      //          console.log("===222===" + res.tempFiles[i].size)
      //       }
      //    }
      // })
      // $api.IaiAddPerso({}, userInfo).then(res => {
      //    console.log(res, '人脸识别');
      // })
@@ -216,6 +270,59 @@
      console.log(e, '删除')
   }
   const jiance = (imgbuffer) => {
      // var frameBuffer =
      const session = wx.createVKSession({
         track: {
            face: {
               mode: 2
            } // mode: 1 - 使用摄像头;2 - 手动传入图像
         },
         version: 'v1'
      })
      const onFrame = timestamp => {
         console.log(webgl,'webgl');
         const frame = session.getVKFrame(webgl.width, webgl.height)
         if (frame) {
            console.log(frame);
            // renderFrame(frame)
         }
         session.requestAnimationFrame(onFrame)
      }
      session.start(errno => {
         if (errno) {
            // 如果失败,将返回 errno
         } else {
            // 否则,返回null,表示成功
            session.detectFace({
               frameBuffer: imgbuffer.data, // 图片 ArrayBuffer 数据。人脸图像像素点数据,每四项表示一个像素点的 RGBA
               width: imgbuffer.width, // 图像宽度
               height: imgbuffer.height,
               scoreThreshold: 0.8, // 评分阈值
               sourceType: 1,
               modelMode: 0,
            })
         }
         session.requestAnimationFrame(onFrame)
      })
      session.on('updateAnchors', anchors => {
         console.log("检测到人脸", anchors)
         anchors.forEach(anchor => {
            console.log('anchor.points', anchor.points)
            console.log('anchor.origin', anchor.origin)
            console.log('anchor.size', anchor.size)
            console.log('anchor.angle', anchor.angle)
         })
      })
      session.on('removeAnchors', () => {
         console.log("人脸消失", "removeAnchors")
      })
   }
</script>
<style>