4

当我在android(4.0)手机上使用html5 'getUserMedia' API 访问摄像头时,出现“前置摄像头”,但我想打开“后置摄像头”。示例代码:

<!DOCTYPE html>
<html>
 <head>
   <meta charset="UTF-8">
   <meta name="viewport" content="width=device-width">
    <title>Html5 Mobile Carema</title>
    <script src="js/jquery.js"></script>
    <script>
       $(document).ready(init);

       function init() {
       try {
        window.URL = window.URL || window.webkitURL || window.msURL
                || window.oURL;
        navigator.getUserMedia = navigator.getUserMedia
                || navigator.webkitGetUserMedia
                || navigator.mozGetUserMedia ||         navigator.msGetUserMedia;


        navigator.getUserMedia({
            video : true
        }, successsCallback, errorCallback);
    } catch (err) {
        // Tries it with old spec of string syntax
        navigator.getUserMedia('video', successsCallback, errorCallback);
    }
    $(":button").click(function() {
        slap();
    });
}
function slap() {
    var video = $("#myVideo")[0];
    var canvas = capture(video);
    $("#result").empty();
    $("#result").append(canvas);
    //alert();
    var imgData = canvas.toDataURL('image/png;base64,');
    //var imgData = canvas.toDataURL("image/png");
    var imgData = imgData.substring(22);
    //blb = dataURItoBlob(imgData);
    //sendMsg(blb);
}
function errorCallback(err) {

}
function successsCallback(stream) {
    $("#myVideo").attr("src", window.webkitURL.createObjectURL(stream));
}
function capture(video) {
    var canvas = document.createElement('canvas');
    var width = video.videoWidth;
    var height = video.videoHeight;
    canvas.width = video.videoWidth;
    canvas.height = video.videoHeight;
    var context = canvas.getContext('2d');
    context.drawImage(video, 0, 0, 160, 120);
    return canvas;
}

  </script>
</head>
<body>
    <video id="myVideo" autoplay="autoplay"></video>
    <br> <input type="button" value="capture" />
<br><div id="result" style="width: 145px"></div>
<div>
<p id="resultMsg" style="color: red"></p>
<p id="decodeTime" style="color: green"></p>
</div>

</body>
</html>

我不知道如何在android手机上访问特定的相机,有人知道吗?谢谢

4

3 回答 3

1

现在可以使用以下facingMode属性在最新规范中指定摄像机:http: //www.w3.org/TR/mediacapture-streams/#idl-def-VideoFacingModeEnum

此属性是MediaStreamConstraints对象的可选部分,它是getUserMedia方法的第一个参数。

这是规范中的一个简化示例:

var supports = navigator.mediaDevices.getSupportedConstraints();
if (!supports["facingMode"]) {
  // Handle lack of browser support if necessary
}
var gotten = navigator.mediaDevices.getUserMedia({
  video: {
    facingMode: {exact: "environment"}
  }
});

该值environment表示设备的后置摄像头。其他值为user,leftright

请注意,对此的支持因浏览器/浏览器版本而异。

于 2015-07-27T03:42:55.420 回答
0

嗨,我认为这对你有用

<script>
var gum = mode => 
  navigator.mediaDevices.getUserMedia({video: {facingMode: {exact: mode}}})
  .then(stream => (video.srcObject = stream))
  .catch(e => log(e));

var stop = () => video.srcObject && video.srcObject.getTracks().forEach(t => t.stop());

var log = msg => div.innerHTML += msg + "<br>";

</script>
<button onclick="stop();gum('user')">Front</button>
<button onclick="stop();gum('environment')">Back</button>
<div id="div"></div><br>
<video id="video" height="320" autoplay></video>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>

面对模式??

https://github.com/webrtcHacks/adapter/issues/820 https://developer.mozilla.org/en-US/docs/Web/API/MediaTrackConstraints/faceMode

于 2019-02-01T11:51:16.097 回答
0

请参阅gotSources(sourceInfos)下面代码中的功能

<!--
Based on Motion Detector Demo Created by Ákos Nikházy. 
If you use this app please link this demo http://motion-detector.nikhazy-dizajn.hu/
-->

<!DOCTYPE html>
<html>
<head>
<meta charset=utf-8 />
<title>Frame capture demo</title>

</head>
<body>
    <header>
        <h1>Motion Detection</h1>
        <h4>with HTML5 API using .getUserMedia()</h4>
    </header>

    <video autoplay></video>
    <hr>
    <canvas id="savePhoto"></canvas>
    <script>




    function hasGetUserMedia() {
        //returns true if supported
        return !!(navigator.getUserMedia || navigator.webkitGetUserMedia
                || navigator.mozGetUserMedia || navigator.msGetUserMedia);
    }

    function onSuccess(stream) {
        //If we can stream from camera.
        var source;

        //Get the stream. This goes to the video tag
        if (window.URL) {
            source = window.URL.createObjectURL(stream);
        } else if (window.webkitURL) {
            source = window.webkitURL.createObjectURL(stream);
        } else {
            source = stream; // Opera and Firefox
        }

        //Set up video tag
        video.autoplay = true;
        video.src = source;

        //We try to find motion in every X second
        setInterval(function() {
            motionDetector();
        }, sampling);

    }

    function onError() {
        //if we fail (not supported, no camera etc.)
        alert('No stream, no win. Refresh.');
    }

    function saveImage(canvasToSave) {
        //create image from canvas
        dataUrl = canvasToSave.toDataURL();
        imageFound = document.createElement('img');
        imageFound.src = dataUrl;

        document.body.appendChild(imageFound);
    }

    function motionDetector() {
        ctxSave.drawImage(video, 0, 0, savePhoto.width, savePhoto.height);
    }

    /*After all those functions lets start setting up the program*/

    //Set up elements. Should be a ini() but I don't care right now
    var video = document.querySelector('video'); //the video tag
    var savePhoto = document.getElementById('savePhoto'); //the possible saved image's canvas

    var ctxSave = savePhoto.getContext('2d'); //the latest image from video in full size and color

    var sampling = 1000; //how much time needed between samples in milliseconds

    var videoSourceInfo = null;

    //We need this so we can use the videoWidth and ...Height, also we setup canvas sizes here, after we have video data
    video.addEventListener("loadedmetadata", function() {
        console.log(video.videoWidth + ":" + video.videoHeight)
        savePhoto.width = video.videoWidth;
        savePhoto.height = video.videoHeight;
    });




    function start() {      //Start the whole magic
        if (hasGetUserMedia()) {


            //it is working?
            navigator.getUserMedia
                    || (navigator.getUserMedia = navigator.mozGetUserMedia
                            || navigator.webkitGetUserMedia
                            || navigator.msGetUserMedia);


            var videoSourceInfoId = videoSourceInfo.id;
            var constraints = {
                video : {
                    optional: [{sourceId: videoSourceInfoId}]
                },
                toString : function() {
                    return "video";
                }
            };


            navigator.getUserMedia(constraints, onSuccess, onError);
        } else {
            //no support
            alert('getUserMedia() is not supported in your browser. Try Chrome.');
        }
    }

    function gotSources(sourceInfos) {
        for (var i = sourceInfos.length-1 ; i >= 0; i--) { // get last camera index (supposed to back camera)
            var sourceInfo = sourceInfos[i];
            if (sourceInfo.kind === 'video') {
                videoSourceInfo = sourceInfo;
                console.log('SourceId: ', videoSourceInfo.id);
                start();
                break;
            } else {
                console.log('Some other kind of source: ', sourceInfo);
            }
        }
    }

    if (typeof MediaStreamTrack === 'undefined') {
        alert('This browser does not support MediaStreamTrack.\n\nTry Chrome Canary.');
    } else {
        MediaStreamTrack.getSources(gotSources);  // async task
    }


</script>

</body>
</html>
于 2015-11-04T15:56:37.767 回答