У меня есть HTML-файл, к которому я присоединил стиль CSS, код JS для классификации изображений и код HTML из MediaPipe (https://codepen.io/mediapipe-preview/pen/BaVZejK). Я создал собственную модель .tflite
, путь к которой был добавлен в modelAssetPath:
вместо модели по умолчанию.
Я получаю следующие ошибки в консоли отладки в Chrome:
Access to fetch at 'https://storage.googleapis.com/xxxxxxxxxx/resnet50_quantized.tflite' from origin 'null' has been blocked by CORS policy: No 'Access-Control-Allow-Origin' header is present on the requested resource. If an opaque response serves your needs, set the request's mode to 'no-cors' to fetch the resource with CORS disabled.
и
Failed to load resource: net::ERR_FAILED -- from a public object in Google Cloud Storage
и
Uncaught (in promise) TypeError: Failed to fetch
at Mh.l (tasks-vision:7:47151)
at Mh.l (tasks-vision:7:76591)
at Mh.o (tasks-vision:7:121629)
at Zo (tasks-vision:7:45744)
at async createImageClassifier (
В VSCode я получаю следующее:
Could not read source map for https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision: Unexpected 404 response from https://cdn.jsdelivr.net/npm/@mediapipe/vision_bundle_mjs.js.map: Failed to resolve the requested file.
Uncaught TypeError TypeError: Failed to fetch
at l (cdn.jsdelivr.net/npm/@mediapipe/tasks-vision:7:47151)
at l (cdn.jsdelivr.net/npm/@mediapipe/tasks-vision:7:76591)
at o (cdn.jsdelivr.net/npm/@mediapipe/tasks-vision:7:121629)
at Zo (cdn.jsdelivr.net/npm/@mediapipe/tasks-vision:7:45744)
--- await ---
Поскольку мои знания JS весьма ограничены, я хотел бы иметь некоторые идеи о том, что может быть не так, например, положение скрипта, модуля, href, если это возможно.
Вот мои усилия на данный момент:
<!DOCTYPE html>
<html lang = "en">
<head>
<style>
@use "@material";
body {
font-family: roboto;
margin: 2em;
color: #3d3d3d;
--mdc-theme-primary: #007f8b;
--mdc-theme-on-primary: #f1f3f4;
}
h1 {
color: #007f8b;
}
h2 {
clear: both;
}
video {
clear: both;
display: block;
}
section {
opacity: 1;
transition: opacity 500ms ease-in-out;
}
.mdc-button.mdc-button--raised.removed {
display: none;
}
.removed {
display: none;
}
.invisible {
opacity: 0.2;
}
.videoView,
.classifyOnClick {
position: relative;
float: left;
width: 48%;
margin: 2% 1%;
cursor: pointer;
}
.videoView p,
.classifyOnClick p {
padding: 5px;
background-color: #007f8b;
color: #fff;
z-index: 2;
margin: 0;
}
.highlighter {
background: rgba(0, 255, 0, 0.25);
border: 1px dashed #fff;
z-index: 1;
position: absolute;
}
.classifyOnClick {
z-index: 0;
font-size: calc(8px + 1.2vw);
}
.classifyOnClick img {
width: 100%;
}
.webcamPredictions {
padding-top: 5px;
padding-bottom: 5px;
background-color: #007f8b;
color: #fff;
border: 1px dashed rgba(255, 255, 255, 0.7);
z-index: 2;
margin: 0;
width: 100%;
font-size: calc(8px + 1.2vw);
}
</style>
</head>
<body>
<link href = "https://unpkg.com/material-components-web@latest/dist/material-components-web.min.css" rel = "stylesheet">
<script src = "https://unpkg.com/material-components-web@latest/dist/material-components-web.min.js"></script>
<h1>Classifying images using the MediaPipe Image Classifier Task</h1>
<section id = "demos" class = "invisible">
<h2>Demo: Classify Images</h2>
<p><b>Click on an image below</b> to see its classification.</p>
<div class = "classifyOnClick">
<img src = "https://assets.codepen.io/9177687/dog_flickr_publicdomain.jpeg" width = "100%" crossorigin = "anonymous" title = "Click to get classification!" />
<p class = "classification removed">
</p>
</div>
<div class = "classifyOnClick">
<img src = "https://assets.codepen.io/9177687/cat_flickr_publicdomain.jpeg" width = "100%" crossorigin = "anonymous" title = "Click to get classification!" />
<p class = "classification removed">
</p>
</div>
<h2>Demo: Webcam continuous classification</h2>
<p>Hold some objects up close to your webcam to get real-time classification. For best results, avoid having too many objects visible to the camera.</br>Click <b>enable webcam</b> below and grant access to the webcam if prompted.</p>
<div class = "webcam">
<button id = "webcamButton" class = "mdc-button mdc-button--raised">
<span class = "mdc-button__ripple"></span>
<span class = "mdc-button__label">ENABLE WEBCAM</span>
</button>
<video id = "webcam" autoplay playsinline></video>
<p id = "webcamPredictions" class = "webcamPredictions removed"></p>
</div>
</section>
<script type = "module">
import {
ImageClassifier,
FilesetResolver
} from "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision";
const video = document.getElementById("webcam");
const webcamPredictions = document.getElementById("webcamPredictions");
const demosSection = document.getElementById("demos");
let enableWebcamButton;
let webcamRunning = false;
const videoHeight = "360px";
const videoWidth = "480px";
const imageContainers = document.getElementsByClassName(
"classifyOnClick"
);
let runningMode = "IMAGE";
// Add click event listeners for the img elements.
for (let i = 0; i < imageContainers.length; i++) {
imageContainers[i].children[0].addEventListener("click", handleClick);
}
// Track imageClassifier object and load status.
let imageClassifier;
const createImageClassifier = async () => {
const vision = await FilesetResolver.forVisionTasks(
"https://cdn.jsdelivr.net/npm/@mediapipe/[email protected]/wasm"
);
imageClassifier = await ImageClassifier.createFromOptions(vision, {
baseOptions: {
modelAssetPath: `https://storage.googleapis.com/xxxxxxxxxxxxxxxxxx/resnet50_quantized.tflite`
},
maxResults: 7,
runningMode: runningMode,
scoreThreshold: 0.0
});
// Show demo section now model is ready to use.
demosSection.classList.remove("invisible");
};
createImageClassifier();
/**
* Demo 1: Classify images on click and display results.
*/
async function handleClick(event) {
// Do not classify if imageClassifier hasn't loaded
if (imageClassifier === undefined) {
return;
}
// if video mode is initialized, set runningMode to image
if (runningMode === "VIDEO") {
runningMode = "IMAGE";
await imageClassifier.setOptions({ runningMode: "IMAGE" });
}
// imageClassifier.classify() returns a promise which, when resolved, is a ClassificationResult object.
// Use the ClassificationResult to print out the results of the prediction.
const classificationResult = imageClassifier.classify(event.target);
// Write the predictions to a new paragraph element and add it to the DOM.
const classifications = classificationResult.classifications;
const p = event.target.parentNode.childNodes[3];
p.className = "classification";
p.innerText =
"Classificaton: " +
classifications[0].categories[0].categoryName +
"\n Confidence: " +
Math.round(parseFloat(classifications[0].categories[0].score) * 100) +
"%";
classificationResult.close();
}
/********************************************************************
// Demo 2: Continuously grab image from webcam stream and classify it.
********************************************************************/
// Check if webcam access is supported.
function hasGetUserMedia() {
return !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
}
// Get classification from the webcam
async function predictWebcam() {
// Do not classify if imageClassifier hasn't loaded
if (imageClassifier === undefined) {
return;
}
// if image mode is initialized, create a new classifier with video runningMode
if (runningMode === "IMAGE") {
runningMode = "VIDEO";
await imageClassifier.setOptions({ runningMode: "VIDEO" });
}
const startTimeMs = performance.now();
const classificationResult = imageClassifier.classifyForVideo(
video,
startTimeMs
);
video.style.height = videoHeight;
video.style.width = videoWidth;
webcamPredictions.style.width = videoWidth;
const classifications = classificationResult.classifications;
webcamPredictions.className = "webcamPredictions";
webcamPredictions.innerText =
"Classification: " +
classifications[0].categories[0].categoryName +
"\n Confidence: " +
Math.round(parseFloat(classifications[0].categories[0].score) * 100) +
"%";
// Call this function again to keep predicting when the browser is ready.
if (webcamRunning === true) {
window.requestAnimationFrame(predictWebcam);
}
}
// Enable the live webcam view and start classification.
async function enableCam(event) {
if (imageClassifier === undefined) {
return;
}
if (webcamRunning === true) {
webcamRunning = false;
enableWebcamButton.innerText = "ENABLE PREDICTIONS";
} else {
webcamRunning = true;
enableWebcamButton.innerText = "DISABLE PREDICTIONS";
}
// getUsermedia parameters.
const constraints = {
video: true
};
// Activate the webcam stream.
video.srcObject = await navigator.mediaDevices.getUserMedia(constraints);
video.addEventListener("loadeddata", predictWebcam);
}
// If webcam supported, add event listener to button.
if (hasGetUserMedia()) {
enableWebcamButton = document.getElementById("webcamButton");
enableWebcamButton.addEventListener("click", enableCam);
} else {
console.warn("getUserMedia() is not supported by your browser");
}
</script>
</body>
</html>
И модель по умолчанию .tflite
, и моя настроенная .tflite
загрузка в адресной строке Chrome, поэтому я не думаю, что это разрешение объекта. Как видите, страница не загружается:
Ух ты! Теперь изображения загружаются! Единственная проблема теперь заключается в том, что когда я нажимаю на изображение, оно не классифицируется. Спасибо @noamgaash =)
@noamgaash Проблема решена, оставьте свой комментарий в качестве ответа, и я приму его
Включение поддержки перекрестного происхождения в вашем хранилище Google должно решить проблему — сообщение has been blocked by CORS policy
указывает, что загрузка этих ресурсов из другого источника (домена/порта) блокируется браузером при отсутствии заголовка Access-Control-Allow-Origin
.
Удачи :)
включили ли вы поддержку перекрестного происхождения в своем хранилище Google?