您好,登錄后才能下訂單哦!
Android中怎么實現(xiàn)動態(tài)人臉檢測,針對這個問題,這篇文章詳細介紹了相對應的分析和解答,希望可以幫助更多想解決這個問題的小伙伴找到更簡單易行的方法。
第一步
我們首先來定義一個surfaceview 蓋在我們Carmen使用的surfaceview上 進行對人臉范圍的繪制
public class FindFaceView extends SurfaceView implements SurfaceHolder.Callback { private SurfaceHolder holder; private int mWidth; private int mHeight; private float eyesDistance; public FindFaceView(Context context, AttributeSet attrs) { super(context, attrs); holder = getHolder(); holder.addCallback(this); holder.setFormat(PixelFormat.TRANSPARENT); this.setZOrderOnTop(true); } @Override public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) { mWidth = width; mHeight = height; } @Override public void surfaceCreated(SurfaceHolder holder) { } @Override public void surfaceDestroyed(SurfaceHolder holder) { } public void drawRect(FaceDetector.Face[] faces, int numberOfFaceDetected) { Canvas canvas = holder.lockCanvas(); if (canvas != null) { Paint clipPaint = new Paint(); clipPaint.setAntiAlias(true); clipPaint.setStyle(Paint.Style.STROKE); clipPaint .setXfermode(new PorterDuffXfermode(PorterDuff.Mode.CLEAR)); canvas.drawPaint(clipPaint); canvas.drawColor(getResources().getColor(color.transparent)); Paint paint = new Paint(); paint.setAntiAlias(true); paint.setColor(Color.GREEN); paint.setStyle(Style.STROKE); paint.setStrokeWidth(5.0f); for (int i = 0; i < numberOfFaceDetected; i++) { Face face = faces[i]; PointF midPoint = new PointF(); // 獲得兩眼之間的中間點 face.getMidPoint(midPoint); // 獲得兩眼之間的距離 eyesDistance = face.eyesDistance(); // 換算出預覽圖片和屏幕顯示區(qū)域的比例參數(shù) float scale_x = mWidth / 500; float scale_y = mHeight / 600; Log.e("eyesDistance=", eyesDistance + ""); Log.e("midPoint.x=", midPoint.x + ""); Log.e("midPoint.y=", midPoint.y + ""); // 因為拍攝的相片跟實際顯示的圖像是鏡像關(guān)系,所以在圖片上獲取的兩眼中間點跟手機上顯示的是相反方向 canvas.drawRect((int) (240 - midPoint.x - eyesDistance) * scale_x, (int) (midPoint.y * scale_y), (int) (240 - midPoint.x + eyesDistance) * scale_x, (int) (midPoint.y + 3 * eyesDistance) * scale_y, paint); } holder.unlockCanvasAndPost(canvas); } } }
重要的地方
1. holder = getHolder();獲取surfaceholder與我們要繪制人臉范圍的畫布進行綁定Canvas canvas = holder.lockCanvas();這樣我們就可以愉快的進行繪制了,當然前提是我們要拿到人臉的坐標位置。
2. 還有重要的一點,就是要讓我們用來蓋在Carema上的Surfaceview可以同名,并且設置起在視圖樹的層級為最高。
holder.setFormat(PixelFormat.TRANSPARENT); this.setZOrderOnTop(true);
第二步
就是我們對人臉進行檢測了,當然前提是我們要獲得幀圖
public class FaceRecognitionDemoActivity extends Activity implements OnClickListener { private SurfaceView preview; private Camera camera; private Camera.Parameters parameters; private int orientionOfCamera;// 前置攝像頭的安裝角度 private int faceNumber;// 識別的人臉數(shù) private FaceDetector.Face[] faces; private FindFaceView mFindFaceView; private ImageView iv_photo; private Button bt_camera; TextView mTV; /** * Called when the activity is first created. */ @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); } @Override protected void onStart() { super.onStart(); iv_photo = (ImageView) findViewById(R.id.iv_photo); bt_camera = (Button) findViewById(R.id.bt_camera); mTV = (TextView) findViewById(R.id.show_count); bt_camera.setOnClickListener(this); mFindFaceView = (FindFaceView) findViewById(R.id.my_preview); preview = (SurfaceView) findViewById(R.id.preview); // 設置緩沖類型(必不可少) preview.getHolder().setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); // 設置surface的分辨率 preview.getHolder().setFixedSize(176, 144); // 設置屏幕常亮(必不可少) preview.getHolder().setKeepScreenOn(true); preview.getHolder().addCallback(new SurfaceCallback()); } private final class MyPictureCallback implements PictureCallback { @Override public void onPictureTaken(byte[] data, Camera camera) { try { Bitmap bitmap = BitmapFactory.decodeByteArray(data, 0, data.length); Matrix matrix = new Matrix(); matrix.setRotate(-90); Bitmap bmp = Bitmap.createBitmap(bitmap, 0, 0, bitmap .getWidth(), bitmap.getHeight(), matrix, true); bitmap.recycle(); iv_photo.setImageBitmap(bmp); camera.startPreview(); } catch (Exception e) { e.printStackTrace(); } } } private final class SurfaceCallback implements Callback { @Override public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) { if (camera != null) { parameters = camera.getParameters(); parameters.setPictureFormat(PixelFormat.JPEG); // 設置預覽區(qū)域的大小 parameters.setPreviewSize(width, height); // 設置每秒鐘預覽幀數(shù) parameters.setPreviewFrameRate(20); // 設置預覽圖片的大小 parameters.setPictureSize(width, height); parameters.setJpegQuality(80); } } @Override public void surfaceCreated(SurfaceHolder holder) { int cameraCount = 0; Camera.CameraInfo cameraInfo = new Camera.CameraInfo(); cameraCount = Camera.getNumberOfCameras(); //設置相機的參數(shù) for (int i = 0; i < cameraCount; i++) { Camera.getCameraInfo(i, cameraInfo); if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) { try { camera = Camera.open(i); camera.setPreviewDisplay(holder); setCameraDisplayOrientation(i, camera); //最重要的設置 幀圖的回調(diào) camera.setPreviewCallback(new MyPreviewCallback()); camera.startPreview(); } catch (Exception e) { e.printStackTrace(); } } } } @Override public void surfaceDestroyed(SurfaceHolder holder) { //記得釋放,避免OOM和占用 if (camera != null) { camera.setPreviewCallback(null); camera.stopPreview(); camera.release(); camera = null; } } } private class MyPreviewCallback implements PreviewCallback { @Override public void onPreviewFrame(byte[] data, Camera camera) { //這里需要注意,回調(diào)出來的data不是我們直接意義上的RGB圖 而是YUV圖,因此我們需要 //將YUV轉(zhuǎn)化為bitmap再進行相應的人臉檢測,同時注意必須使用RGB_565,才能進行人臉檢測,其余無效 Camera.Size size = camera.getParameters().getPreviewSize(); YuvImage yuvImage = new YuvImage(data, ImageFormat.NV21, size.width, size.height, null); ByteArrayOutputStream baos = new ByteArrayOutputStream(); yuvImage.compressToJpeg(new Rect(0, 0, size.width, size.height), 80, baos); byte[] byteArray = baos.toByteArray(); detectionFaces(byteArray); } } /** * 檢測人臉 * * @param data 預覽的圖像數(shù)據(jù) */ private void detectionFaces(byte[] data) { BitmapFactory.Options options = new BitmapFactory.Options(); Bitmap bitmap1 = BitmapFactory.decodeByteArray(data, 0, data.length, options); int width = bitmap1.getWidth(); int height = bitmap1.getHeight(); Matrix matrix = new Matrix(); Bitmap bitmap2 = null; FaceDetector detector = null; //設置各個角度的相機,這樣我們的檢測效果才是最好 switch (orientionOfCamera) { case 0: //初始化人臉檢測(下同) detector = new FaceDetector(width, height, 10); matrix.postRotate(0.0f, width / 2, height / 2); // 以指定的寬度和高度創(chuàng)建一張可變的bitmap(圖片格式必須是RGB_565,不然檢測不到人臉) bitmap2 = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565); break; case 90: detector = new FaceDetector(height, width, 1); matrix.postRotate(-270.0f, height / 2, width / 2); bitmap2 = Bitmap.createBitmap(height, width, Bitmap.Config.RGB_565); break; case 180: detector = new FaceDetector(width, height, 1); matrix.postRotate(-180.0f, width / 2, height / 2); bitmap2 = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565); break; case 270: detector = new FaceDetector(height, width, 1); matrix.postRotate(-90.0f, height / 2, width / 2); bitmap2 = Bitmap.createBitmap(height, width, Bitmap.Config.RGB_565); break; } //設置支持的面數(shù)(最大支持檢測多少人的臉 ,可以根據(jù)需要調(diào)整,不過需要與findFaces中的參數(shù)數(shù)值相同,否則會拋出異常) faces = new FaceDetector.Face[10]; Paint paint = new Paint(); paint.setDither(true); Canvas canvas = new Canvas(); canvas.setBitmap(bitmap2); canvas.setMatrix(matrix); // 將bitmap1畫到bitmap2上(這里的偏移參數(shù)根據(jù)實際情況可能要修改) canvas.drawBitmap(bitmap1, 0, 0, paint); //這里通過向findFaces中傳遞幀圖轉(zhuǎn)化后的bitmap和最大檢測的人臉數(shù)face,返回檢測后的人臉數(shù) faceNumber = detector.findFaces(bitmap2, faces); mTV.setText("facnumber----" + faceNumber); mTV.setTextColor(Color.RED); //這里就是我們的人臉識別,繪制識別后的人臉區(qū)域的類 if (faceNumber != 0) { mFindFaceView.setVisibility(View.VISIBLE); mFindFaceView.drawRect(faces, faceNumber); } else { mFindFaceView.setVisibility(View.GONE); } bitmap2.recycle(); bitmap1.recycle(); } /** * 設置相機的顯示方向(這里必須這么設置,不然檢測不到人臉) * * @param cameraId 相機ID(0是后置攝像頭,1是前置攝像頭) * @param camera 相機對象 */ private void setCameraDisplayOrientation(int cameraId, Camera camera) { Camera.CameraInfo info = new Camera.CameraInfo(); Camera.getCameraInfo(cameraId, info); int rotation = getWindowManager().getDefaultDisplay().getRotation(); int degree = 0; switch (rotation) { case Surface.ROTATION_0: degree = 0; break; case Surface.ROTATION_90: degree = 90; break; case Surface.ROTATION_180: degree = 180; break; case Surface.ROTATION_270: degree = 270; break; } orientionOfCamera = info.orientation; int result; if (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) { result = (info.orientation + degree) % 360; result = (360 - result) % 360; } else { result = (info.orientation - degree + 360) % 360; } camera.setDisplayOrientation(result); } @Override public void onClick(View v) { switch (v.getId()) { case R.id.bt_camera: if (camera != null) { try { camera.takePicture(null, null, new MyPictureCallback()); } catch (Exception e) { e.printStackTrace(); } } break; } } }
關(guān)于Android中怎么實現(xiàn)動態(tài)人臉檢測問題的解答就分享到這里了,希望以上內(nèi)容可以對大家有一定的幫助,如果你還有很多疑惑沒有解開,可以關(guān)注億速云行業(yè)資訊頻道了解更多相關(guān)知識。
免責聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點不代表本網(wǎng)站立場,如果涉及侵權(quán)請聯(lián)系站長郵箱:is@yisu.com進行舉報,并提供相關(guān)證據(jù),一經(jīng)查實,將立刻刪除涉嫌侵權(quán)內(nèi)容。