Commit fdb48689 authored by Dominic Mazzoni's avatar Dominic Mazzoni Committed by Commit Bot

Handle captions and labels in addition to OCR

Previously the image annotation code only handled OCR results
from the server. Now that captions and labels are hooked up,
expose those image annotation strings too.

Bug: 905419
Change-Id: I88542054c49ba252d5bc13d0024054059b8f5123
Reviewed-on: https://chromium-review.googlesource.com/c/1492475Reviewed-by: default avatarKatie Dektar <katie@chromium.org>
Commit-Queue: Dominic Mazzoni <dmazzoni@chromium.org>
Cr-Commit-Position: refs/heads/master@{#636279}
parent b27ad88f
......@@ -183,25 +183,40 @@ void AXImageAnnotator::OnImageAnnotated(
return;
}
std::vector<base::string16> contextualized_strings;
for (const auto& annotation : result->get_annotations()) {
if (annotation->type != image_annotation::mojom::AnnotationType::kOcr)
int message_id = 0;
switch (annotation->type) {
case image_annotation::mojom::AnnotationType::kOcr:
message_id = IDS_AX_IMAGE_ANNOTATION_OCR_CONTEXT;
break;
case image_annotation::mojom::AnnotationType::kCaption:
case image_annotation::mojom::AnnotationType::kLabel:
message_id = IDS_AX_IMAGE_ANNOTATION_DESCRIPTION_CONTEXT;
break;
}
// Skip unrecognized annotation types.
if (!message_id)
continue;
if (annotation->text.empty())
return;
auto contextualized_string = GetContentClient()->GetLocalizedString(
IDS_AX_IMAGE_ANNOTATION_OCR_CONTEXT,
base::UTF8ToUTF16(annotation->text));
image_annotations_.at(image.AxID())
.set_annotation(base::UTF16ToUTF8(contextualized_string));
render_accessibility_->MarkWebAXObjectDirty(image, false /* subtree */);
contextualized_strings.push_back(GetContentClient()->GetLocalizedString(
message_id, base::UTF8ToUTF16(annotation->text)));
}
if (contextualized_strings.size() == 0)
return;
}
DLOG(WARNING) << "No OCR results.";
// TODO(accessibility): join two sentences together in a more i18n-friendly
// way. Since this is intended for a screen reader, though, a period
// probably works in almost all languages.
std::string contextualized_string = base::UTF16ToUTF8(
base::JoinString(contextualized_strings, base::ASCIIToUTF16(". ")));
image_annotations_.at(image.AxID()).set_annotation(contextualized_string);
render_accessibility_->MarkWebAXObjectDirty(image, false /* subtree */);
}
} // namespace content
......@@ -63,6 +63,11 @@ namespace content {
namespace {
// Images smaller than this number, in CSS pixels, will never get annotated.
// Note that OCR works on pretty small images, so this shouldn't be too large.
const int kMinImageAnnotationWidth = 16;
const int kMinImageAnnotationHeight = 16;
void AddIntListAttributeFromWebObjects(ax::mojom::IntListAttribute attr,
const WebVector<WebAXObject>& objects,
AXContentNodeData* dst) {
......@@ -1053,6 +1058,12 @@ void BlinkAXTreeSource::AddImageAnnotations(blink::WebAXObject src,
return;
}
// Skip images that are too small to label. This also catches
// unloaded images where the size is unknown.
if (dst->relative_bounds.bounds.width() < kMinImageAnnotationWidth ||
dst->relative_bounds.bounds.height() < kMinImageAnnotationHeight)
return;
if (!image_annotator_) {
dst->SetImageAnnotationStatus(
ax::mojom::ImageAnnotationStatus::kEligibleForAnnotation);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment