1239 lines
38 KiB
Python
1239 lines
38 KiB
Python
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
|
# sources: lens_overlay_client_context.proto, lens_overlay_client_logs.proto, lens_overlay_client_platform.proto, lens_overlay_cluster_info.proto, lens_overlay_deep_gleam_data.proto, lens_overlay_document.proto, lens_overlay_filters.proto, lens_overlay_geometry.proto, lens_overlay_image_crop.proto, lens_overlay_image_data.proto, lens_overlay_interaction_request_metadata.proto, lens_overlay_knowledge_intent_query.proto, lens_overlay_knowledge_query.proto, lens_overlay_math_solver_query.proto, lens_overlay_message_set.proto, lens_overlay_overlay_object.proto, lens_overlay_payload.proto, lens_overlay_phase_latencies_metadata.proto, lens_overlay_platform.proto, lens_overlay_polygon.proto, lens_overlay_request_id.proto, lens_overlay_routing_info.proto, lens_overlay_selection_type.proto, lens_overlay_server.proto, lens_overlay_service_deps.proto, lens_overlay_stickiness_signals.proto, lens_overlay_surface.proto, lens_overlay_text.proto, lens_overlay_text_query.proto, lens_overlay_translate_stickiness_signals.proto, lens_overlay_video_context_input_params.proto, lens_overlay_video_params.proto, lens_overlay_visual_search_interaction_data.proto, lens_overlay_visual_search_interaction_log_data.proto
|
|
# plugin: python-betterproto
|
|
# This file has been @generated
|
|
|
|
from dataclasses import dataclass
|
|
from typing import (
|
|
List,
|
|
Optional,
|
|
)
|
|
|
|
import betterproto
|
|
|
|
|
|
class LensOverlayFilterType(betterproto.Enum):
|
|
"""Supported filter types."""
|
|
|
|
UNKNOWN_FILTER_TYPE = 0
|
|
TRANSLATE = 2
|
|
AUTO_FILTER = 7
|
|
|
|
|
|
class Platform(betterproto.Enum):
|
|
UNSPECIFIED = 0
|
|
WEB = 3
|
|
|
|
|
|
class Surface(betterproto.Enum):
|
|
UNSPECIFIED = 0
|
|
CHROMIUM = 4
|
|
|
|
|
|
class LensRenderingEnvironment(betterproto.Enum):
|
|
"""The possible rendering environments."""
|
|
|
|
RENDERING_ENV_UNSPECIFIED = 0
|
|
RENDERING_ENV_LENS_OVERLAY = 14
|
|
|
|
|
|
class LensOverlayPhaseLatenciesMetadataImageType(betterproto.Enum):
|
|
UNKNOWN = 0
|
|
JPEG = 1
|
|
PNG = 2
|
|
WEBP = 3
|
|
|
|
|
|
class LensOverlayClientLogsLensOverlayEntryPoint(betterproto.Enum):
|
|
UNKNOWN_ENTRY_POINT = 0
|
|
APP_MENU = 1
|
|
PAGE_CONTEXT_MENU = 2
|
|
IMAGE_CONTEXT_MENU = 3
|
|
OMNIBOX_BUTTON = 4
|
|
TOOLBAR_BUTTON = 5
|
|
FIND_IN_PAGE = 6
|
|
|
|
|
|
class ClientPlatform(betterproto.Enum):
|
|
UNSPECIFIED = 0
|
|
LENS_OVERLAY = 2
|
|
|
|
|
|
class CoordinateType(betterproto.Enum):
|
|
"""Specifies the coordinate system used for geometry protos."""
|
|
|
|
UNSPECIFIED = 0
|
|
"""Unspecified default value, per proto best practice."""
|
|
|
|
NORMALIZED = 1
|
|
"""Normalized coordinates."""
|
|
|
|
IMAGE = 2
|
|
"""Image pixel coordinates."""
|
|
|
|
|
|
class PolygonVertexOrdering(betterproto.Enum):
|
|
"""Specifies the vertex ordering."""
|
|
|
|
VERTEX_ORDERING_UNSPECIFIED = 0
|
|
CLOCKWISE = 1
|
|
COUNTER_CLOCKWISE = 2
|
|
|
|
|
|
class WritingDirection(betterproto.Enum):
|
|
"""The text reading order."""
|
|
|
|
LEFT_TO_RIGHT = 0
|
|
RIGHT_TO_LEFT = 1
|
|
TOP_TO_BOTTOM = 2
|
|
|
|
|
|
class Alignment(betterproto.Enum):
|
|
"""The text alignment."""
|
|
|
|
DEFAULT_LEFT_ALIGNED = 0
|
|
RIGHT_ALIGNED = 1
|
|
CENTER_ALIGNED = 2
|
|
|
|
|
|
class TextLayoutWordType(betterproto.Enum):
|
|
TEXT = 0
|
|
"""Printed text."""
|
|
|
|
FORMULA = 1
|
|
"""Formula type, including mathematical or chemical formulas."""
|
|
|
|
|
|
class TranslationDataStatusCode(betterproto.Enum):
|
|
UNKNOWN = 0
|
|
SUCCESS = 1
|
|
SERVER_ERROR = 2
|
|
UNSUPPORTED_LANGUAGE_PAIR = 3
|
|
SAME_LANGUAGE = 4
|
|
UNKNOWN_SOURCE_LANGUAGE = 5
|
|
INVALID_REQUEST = 6
|
|
DEADLINE_EXCEEDED = 7
|
|
EMPTY_TRANSLATION = 8
|
|
NO_OP_TRANSLATION = 9
|
|
|
|
|
|
class TranslationDataBackgroundImageDataFileFormat(betterproto.Enum):
|
|
"""File format of the bytes in background_image."""
|
|
|
|
UNKNOWN = 0
|
|
RAW_BYTES_RGBA = 1
|
|
PNG_RGBA = 2
|
|
WEBP_RGBA = 3
|
|
JPEG_RGB_PNG_MASK = 4
|
|
|
|
|
|
class LensOverlayInteractionRequestMetadataType(betterproto.Enum):
|
|
UNKNOWN = 0
|
|
TAP = 1
|
|
"""User's tap on the screen."""
|
|
|
|
REGION = 2
|
|
"""User's region selection on the screenshot."""
|
|
|
|
TEXT_SELECTION = 3
|
|
"""User's text selection on the screenshot."""
|
|
|
|
REGION_SEARCH = 4
|
|
"""User selected a bounding box to region search."""
|
|
|
|
OBJECT_FULFILLMENT = 5
|
|
"""Requests selection and fulfillment of a specific object."""
|
|
|
|
CONTEXTUAL_SEARCH_QUERY = 9
|
|
"""User sent a query in the contextual search box."""
|
|
|
|
PDF_QUERY = 10
|
|
"""User sent a query about a pdf."""
|
|
|
|
WEBPAGE_QUERY = 11
|
|
"""User sent a query about a website."""
|
|
|
|
|
|
class OverlayObjectRenderingMetadataRenderType(betterproto.Enum):
|
|
DEFAULT = 0
|
|
GLEAM = 1
|
|
|
|
|
|
class LensOverlaySelectionType(betterproto.Enum):
|
|
"""Possible selection types for Lens overlay."""
|
|
|
|
UNKNOWN_SELECTION_TYPE = 0
|
|
TAP_ON_EMPTY = 1
|
|
SELECT_TEXT_HIGHLIGHT = 3
|
|
REGION_SEARCH = 7
|
|
INJECTED_IMAGE = 10
|
|
TAP_ON_REGION_GLEAM = 15
|
|
MULTIMODAL_SEARCH = 18
|
|
SELECT_TRANSLATED_TEXT = 21
|
|
TAP_ON_OBJECT = 22
|
|
MULTIMODAL_SUGGEST_TYPEAHEAD = 25
|
|
MULTIMODAL_SUGGEST_ZERO_PREFIX = 26
|
|
TRANSLATE_CHIP = 52
|
|
SYMBOLIC_MATH_OBJECT = 53
|
|
|
|
|
|
class PayloadRequestType(betterproto.Enum):
|
|
"""The type of the request the payload is sent in."""
|
|
|
|
REQUEST_TYPE_DEFAULT = 0
|
|
"""Unset Request type."""
|
|
|
|
REQUEST_TYPE_PDF = 1
|
|
"""Request is for PDF."""
|
|
|
|
REQUEST_TYPE_EARLY_PARTIAL_PDF = 3
|
|
"""Request is for partial PDF upload."""
|
|
|
|
REQUEST_TYPE_WEBPAGE = 2
|
|
"""Request is for webpage."""
|
|
|
|
|
|
class PayloadCompressionType(betterproto.Enum):
|
|
"""Possible compression types for content_data."""
|
|
|
|
UNCOMPRESSED = 0
|
|
"""Default value. File is not compressed."""
|
|
|
|
ZSTD = 1
|
|
"""ZSTD compression."""
|
|
|
|
|
|
class LensOverlayServerErrorErrorType(betterproto.Enum):
|
|
UNKNOWN_TYPE = 0
|
|
MISSING_REQUEST = 1
|
|
|
|
|
|
class StickinessSignalsNamespace(betterproto.Enum):
|
|
UNKNOWN = 0
|
|
TRANSLATE_LITE = 56
|
|
EDUCATION_INPUT = 79
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class AppliedFilter(betterproto.Message):
|
|
"""Supported filter types."""
|
|
|
|
filter_type: "LensOverlayFilterType" = betterproto.enum_field(1)
|
|
translate: "AppliedFilterTranslate" = betterproto.message_field(
|
|
3, group="filter_payload"
|
|
)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class AppliedFilterTranslate(betterproto.Message):
|
|
target_language: str = betterproto.string_field(1)
|
|
source_language: str = betterproto.string_field(2)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class AppliedFilters(betterproto.Message):
|
|
"""Supported filter types."""
|
|
|
|
filter: List["AppliedFilter"] = betterproto.message_field(1)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayClientContext(betterproto.Message):
|
|
"""Context information of the client sending the request."""
|
|
|
|
platform: "Platform" = betterproto.enum_field(1)
|
|
"""Required. Client platform."""
|
|
|
|
surface: "Surface" = betterproto.enum_field(2)
|
|
"""Optional. Client surface."""
|
|
|
|
locale_context: "LocaleContext" = betterproto.message_field(4)
|
|
"""Required. Locale specific context."""
|
|
|
|
app_id: str = betterproto.string_field(6)
|
|
"""
|
|
Required. Name of the package which sends the request to Lens Frontend.
|
|
"""
|
|
|
|
client_filters: "AppliedFilters" = betterproto.message_field(17)
|
|
"""Filters that are enabled on the client side."""
|
|
|
|
rendering_context: "RenderingContext" = betterproto.message_field(20)
|
|
"""The rendering context info."""
|
|
|
|
client_logging_data: "ClientLoggingData" = betterproto.message_field(23)
|
|
"""Logging data."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LocaleContext(betterproto.Message):
|
|
"""Describes locale context."""
|
|
|
|
language: str = betterproto.string_field(1)
|
|
"""The BCP 47 language tag used to identify the language of the client."""
|
|
|
|
region: str = betterproto.string_field(2)
|
|
"""The CLDR region tag used to identify the region of the client."""
|
|
|
|
time_zone: str = betterproto.string_field(3)
|
|
"""The CLDR time zone ID used to identify the timezone of the client."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class RenderingContext(betterproto.Message):
|
|
rendering_environment: "LensRenderingEnvironment" = betterproto.enum_field(2)
|
|
"""The rendering environment."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class ClientLoggingData(betterproto.Message):
|
|
"""Contains data that can be used for logging purposes."""
|
|
|
|
is_history_eligible: bool = betterproto.bool_field(1)
|
|
"""Whether history is enabled."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayPhaseLatenciesMetadata(betterproto.Message):
|
|
"""Phase latency metadata for the Lens Overlay."""
|
|
|
|
phase: List["LensOverlayPhaseLatenciesMetadataPhase"] = betterproto.message_field(1)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayPhaseLatenciesMetadataPhase(betterproto.Message):
|
|
"""
|
|
Represents a single point in time during the image preprocessing flow.
|
|
"""
|
|
|
|
image_downscale_data: "LensOverlayPhaseLatenciesMetadataPhaseImageDownscaleData" = (
|
|
betterproto.message_field(3, group="phase_data")
|
|
)
|
|
"""Data specifically only relevant for IMAGE_DOWNSCALE_END PhaseType."""
|
|
|
|
image_encode_data: "LensOverlayPhaseLatenciesMetadataPhaseImageEncodeData" = (
|
|
betterproto.message_field(4, group="phase_data")
|
|
)
|
|
"""Data specifically only relevant for IMAGE_ENCODE_END PhaseType."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayPhaseLatenciesMetadataPhaseImageDownscaleData(betterproto.Message):
|
|
original_image_size: int = betterproto.int64_field(1)
|
|
"""The size of the original image, in pixels."""
|
|
|
|
downscaled_image_size: int = betterproto.int64_field(2)
|
|
"""The size of the downscaled image, in pixels."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayPhaseLatenciesMetadataPhaseImageEncodeData(betterproto.Message):
|
|
original_image_type: "LensOverlayPhaseLatenciesMetadataImageType" = (
|
|
betterproto.enum_field(1)
|
|
)
|
|
"""
|
|
The type of the original Image. This only applies to IMAGE_ENCODE_END
|
|
PhaseTypes
|
|
"""
|
|
|
|
encoded_image_size_bytes: int = betterproto.int64_field(2)
|
|
"""The bytes size of the encoded image."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayClientLogs(betterproto.Message):
|
|
phase_latencies_metadata: "LensOverlayPhaseLatenciesMetadata" = (
|
|
betterproto.message_field(1)
|
|
)
|
|
"""
|
|
The phase latency metadata for any image preprocessing required for the
|
|
request.
|
|
"""
|
|
|
|
lens_overlay_entry_point: "LensOverlayClientLogsLensOverlayEntryPoint" = (
|
|
betterproto.enum_field(2)
|
|
)
|
|
"""The Lens Overlay entry point used to access lens."""
|
|
|
|
paella_id: int = betterproto.uint64_field(3)
|
|
"""
|
|
A unique identifier for associating events logged by lens asynchronously.
|
|
"""
|
|
|
|
metrics_collection_disabled: bool = betterproto.bool_field(5)
|
|
"""Whether the user has disabled metrics collection."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayRoutingInfo(betterproto.Message):
|
|
"""Information about where to route the request."""
|
|
|
|
server_address: str = betterproto.string_field(1)
|
|
"""Address to route the request to."""
|
|
|
|
cell_address: str = betterproto.string_field(3)
|
|
"""Cell to route the request to."""
|
|
|
|
blade_target: str = betterproto.string_field(2)
|
|
"""Blade target to route the request to."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayClusterInfo(betterproto.Message):
|
|
"""The cluster info for a Lens Overlay session."""
|
|
|
|
server_session_id: str = betterproto.string_field(1)
|
|
"""ID for subsequent server requests."""
|
|
|
|
search_session_id: str = betterproto.string_field(2)
|
|
"""ID for subsequent search requests."""
|
|
|
|
routing_info: "LensOverlayRoutingInfo" = betterproto.message_field(6)
|
|
"""Info used for routing subsequent requests."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class Polygon(betterproto.Message):
|
|
"""Information about a polygon."""
|
|
|
|
vertex: List["PolygonVertex"] = betterproto.message_field(1)
|
|
vertex_ordering: "PolygonVertexOrdering" = betterproto.enum_field(2)
|
|
coordinate_type: "CoordinateType" = betterproto.enum_field(3)
|
|
"""Specifies the coordinate type of vertices."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class PolygonVertex(betterproto.Message):
|
|
"""Represents a single vertex in the polygon."""
|
|
|
|
x: float = betterproto.float_field(1)
|
|
y: float = betterproto.float_field(2)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class CenterRotatedBox(betterproto.Message):
|
|
"""Information about a center bounding box rotated around its center."""
|
|
|
|
center_x: float = betterproto.float_field(1)
|
|
center_y: float = betterproto.float_field(2)
|
|
width: float = betterproto.float_field(3)
|
|
height: float = betterproto.float_field(4)
|
|
rotation_z: float = betterproto.float_field(5)
|
|
"""
|
|
Clockwise rotation around the center in radians. The rotation angle is
|
|
computed before normalizing the coordinates.
|
|
"""
|
|
|
|
coordinate_type: "CoordinateType" = betterproto.enum_field(6)
|
|
"""
|
|
Specifies the coordinate type of center and size.
|
|
@note default is COORDINATE_TYPE_UNSPECIFIED, please initialize this value
|
|
to NORMALIZED or IMAGE for Lens detection API usage.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class Geometry(betterproto.Message):
|
|
"""Geometric shape(s) used for tracking and detection."""
|
|
|
|
bounding_box: "CenterRotatedBox" = betterproto.message_field(1)
|
|
"""Specifies the bounding box for this geometry."""
|
|
|
|
segmentation_polygon: List["Polygon"] = betterproto.message_field(5)
|
|
"""
|
|
Specifies the segmentation polygon. The vertices of the outer-boundaries
|
|
are in clockwise, and the ones of inner-boundaries are in counter-clockwise
|
|
ordering.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class ZoomedCrop(betterproto.Message):
|
|
"""
|
|
A cropped and potentially re-scaled image region, rectangular subregion of a
|
|
canonical image.
|
|
"""
|
|
|
|
crop: "CenterRotatedBox" = betterproto.message_field(1)
|
|
"""The cropped region of the parent image in parent coordinates."""
|
|
|
|
parent_width: int = betterproto.int32_field(2)
|
|
"""Width of the parent image."""
|
|
|
|
parent_height: int = betterproto.int32_field(3)
|
|
"""Height of the parent image."""
|
|
|
|
zoom: float = betterproto.float_field(4)
|
|
"""
|
|
The ratio of the pixel dimensions of the child image to the pixel
|
|
dimensions of the 'crop' in parent coordinates.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class Text(betterproto.Message):
|
|
text_layout: "TextLayout" = betterproto.message_field(1)
|
|
"""Optional. Information describing the text."""
|
|
|
|
content_language: str = betterproto.string_field(2)
|
|
"""
|
|
Optional. Dominant content language of the text. Language
|
|
code is CLDR/BCP-47.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TextLayout(betterproto.Message):
|
|
"""Nested text structure."""
|
|
|
|
paragraphs: List["TextLayoutParagraph"] = betterproto.message_field(1)
|
|
"""Optional. List of paragraphs in natural reading order."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TextLayoutWord(betterproto.Message):
|
|
id: "TextEntityIdentifier" = betterproto.message_field(1)
|
|
"""Required. Unique id within TextLayout."""
|
|
|
|
plain_text: str = betterproto.string_field(2)
|
|
"""Optional. The text in a plain text."""
|
|
|
|
text_separator: Optional[str] = betterproto.string_field(3, optional=True)
|
|
"""
|
|
Optional. The text separator that should be appended after this word when
|
|
it is concatenated with the subsequent word in the same or next
|
|
line/paragraph into a single-line string. This is specified as optional
|
|
because there is a distinction between the absence of a separator and
|
|
the empty string as a separator.
|
|
"""
|
|
|
|
geometry: "Geometry" = betterproto.message_field(4)
|
|
"""Optional. The geometry of the word."""
|
|
|
|
type: "TextLayoutWordType" = betterproto.enum_field(5)
|
|
"""Optional. The type of this word."""
|
|
|
|
formula_metadata: "TextLayoutWordFormulaMetadata" = betterproto.message_field(6)
|
|
"""
|
|
Optional. Metadata for formulas. This is populated for entities of
|
|
`type=FORMULA`.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TextLayoutWordFormulaMetadata(betterproto.Message):
|
|
latex: str = betterproto.string_field(1)
|
|
"""
|
|
Optional. LaTeX representation of a formula. Can be the same as
|
|
`plain_text`. Example: "\frac{2}{x}=y". The plain text
|
|
representation of this is available in Word.plain_text.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TextLayoutLine(betterproto.Message):
|
|
words: List["TextLayoutWord"] = betterproto.message_field(1)
|
|
"""Optional. List of words in natural reading order."""
|
|
|
|
geometry: "Geometry" = betterproto.message_field(2)
|
|
"""Optional. The geometry of the line."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TextLayoutParagraph(betterproto.Message):
|
|
id: "TextEntityIdentifier" = betterproto.message_field(1)
|
|
"""Required. Unique id within TextLayout."""
|
|
|
|
lines: List["TextLayoutLine"] = betterproto.message_field(2)
|
|
"""
|
|
Optional. List of lines in natural reading order (see also
|
|
`writing_direction`).
|
|
"""
|
|
|
|
geometry: "Geometry" = betterproto.message_field(3)
|
|
"""Optional. Geometry of the paragraph."""
|
|
|
|
writing_direction: "WritingDirection" = betterproto.enum_field(4)
|
|
"""Optional. The text writing direction (aka reading order)."""
|
|
|
|
content_language: str = betterproto.string_field(5)
|
|
"""
|
|
Optional. BCP-47 language code of the dominant language in this
|
|
paragraph.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TextEntityIdentifier(betterproto.Message):
|
|
id: int = betterproto.int64_field(1)
|
|
"""
|
|
Required. Unique entity id used to reference (and match) text entities and
|
|
ranges.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class DeepGleamData(betterproto.Message):
|
|
translation: "TranslationData" = betterproto.message_field(
|
|
10, group="rendering_oneof"
|
|
)
|
|
visual_object_id: List[str] = betterproto.string_field(11)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TranslationData(betterproto.Message):
|
|
status: "TranslationDataStatus" = betterproto.message_field(1)
|
|
target_language: str = betterproto.string_field(2)
|
|
source_language: str = betterproto.string_field(3)
|
|
translation: str = betterproto.string_field(4)
|
|
"""The translated text."""
|
|
|
|
line: List["TranslationDataLine"] = betterproto.message_field(5)
|
|
writing_direction: "WritingDirection" = betterproto.enum_field(7)
|
|
"""The original writing direction of the source text."""
|
|
|
|
alignment: "Alignment" = betterproto.enum_field(8)
|
|
justified: bool = betterproto.bool_field(9)
|
|
"""Whether the text is justified."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TranslationDataStatus(betterproto.Message):
|
|
code: "TranslationDataStatusCode" = betterproto.enum_field(1)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TranslationDataTextStyle(betterproto.Message):
|
|
"""
|
|
Style as the aggregation of the styles of the words in the original text.
|
|
"""
|
|
|
|
text_color: int = betterproto.uint32_field(1)
|
|
"""The foreground color of text in aRGB format."""
|
|
|
|
background_primary_color: int = betterproto.uint32_field(2)
|
|
"""The background color of text in aRGB format."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TranslationDataBackgroundImageData(betterproto.Message):
|
|
"""Properties of the image used to inpaint the source text."""
|
|
|
|
background_image: bytes = betterproto.bytes_field(1)
|
|
"""
|
|
Image bytes to inpaint the source text. Contains image bytes in the
|
|
format specified in file_format.
|
|
"""
|
|
|
|
image_width: int = betterproto.int32_field(2)
|
|
"""Width of background_image in pixels."""
|
|
|
|
image_height: int = betterproto.int32_field(3)
|
|
"""Height of background_image in pixels."""
|
|
|
|
vertical_padding: float = betterproto.float_field(4)
|
|
"""
|
|
Vertical padding to apply to the text box before drawing the background
|
|
image. Expressed as a fraction of the text box height, i.e. 1.0 means
|
|
that the height should be doubled. Half of the padding should be added on
|
|
the top and half on the bottom.
|
|
"""
|
|
|
|
horizontal_padding: float = betterproto.float_field(5)
|
|
"""
|
|
Horizontal padding to apply to the text box before drawing the background
|
|
image. Expressed as a fraction of the text box height. Half of the
|
|
padding should be added on the left and half on the right.
|
|
"""
|
|
|
|
file_format: "TranslationDataBackgroundImageDataFileFormat" = (
|
|
betterproto.enum_field(6)
|
|
)
|
|
text_mask: bytes = betterproto.bytes_field(7)
|
|
"""Text mask for the generated background image."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TranslationDataLine(betterproto.Message):
|
|
start: int = betterproto.int32_field(1)
|
|
"""
|
|
A substring from the translation from start to end (exclusive),
|
|
that needs to be distributed on this line, measured in Unicode
|
|
characters. If not set, the Line doesn't have any translation.
|
|
"""
|
|
|
|
end: int = betterproto.int32_field(2)
|
|
style: "TranslationDataTextStyle" = betterproto.message_field(3)
|
|
word: List["TranslationDataLineWord"] = betterproto.message_field(5)
|
|
background_image_data: "TranslationDataBackgroundImageData" = (
|
|
betterproto.message_field(9)
|
|
)
|
|
"""Background image data is set only when inpainting is computed."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TranslationDataLineWord(betterproto.Message):
|
|
start: int = betterproto.int32_field(1)
|
|
"""
|
|
A substring from the translation from start to end (exclusive),
|
|
representing a word (without separator), measured in Unicode
|
|
characters.
|
|
"""
|
|
|
|
end: int = betterproto.int32_field(2)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayDocument(betterproto.Message):
|
|
"""
|
|
Top-level PDF representation extracted using Pdfium.
|
|
Next ID: 6
|
|
"""
|
|
|
|
pages: List["Page"] = betterproto.message_field(1)
|
|
"""Ordered pdf pages."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class Page(betterproto.Message):
|
|
"""
|
|
Represents a single page of the PDF.
|
|
Next ID: 10
|
|
"""
|
|
|
|
page_number: int = betterproto.int32_field(1)
|
|
"""Page number in the pdf (indexed starting at 1)."""
|
|
|
|
text_segments: List[str] = betterproto.string_field(4)
|
|
"""List of text segments of the page."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class ClientImage(betterproto.Message):
|
|
"""Image data from the client."""
|
|
|
|
image_content: bytes = betterproto.bytes_field(1)
|
|
"""Required. A byte array encoding an image."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class ImageCrop(betterproto.Message):
|
|
"""User-selected / auto-detected cropped image region."""
|
|
|
|
crop_id: str = betterproto.string_field(1)
|
|
"""The ID of the cropped image region."""
|
|
|
|
image: "ClientImage" = betterproto.message_field(2)
|
|
"""The image content of the cropped image region."""
|
|
|
|
zoomed_crop: "ZoomedCrop" = betterproto.message_field(3)
|
|
"""The zoomed crop properties of the cropped image region."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class ImageData(betterproto.Message):
|
|
"""
|
|
Data representing image. Contains image bytes or image retrieval identifier.
|
|
"""
|
|
|
|
payload: "ImagePayload" = betterproto.message_field(1)
|
|
"""Image payload to process. This contains image bytes."""
|
|
|
|
image_metadata: "ImageMetadata" = betterproto.message_field(3)
|
|
"""Required. Context of the given image."""
|
|
|
|
significant_regions: List["Geometry"] = betterproto.message_field(4)
|
|
"""The bounds of significant regions in the image."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class ImagePayload(betterproto.Message):
|
|
image_bytes: bytes = betterproto.bytes_field(1)
|
|
"""Required. Image byte array."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class ImageMetadata(betterproto.Message):
|
|
width: int = betterproto.int32_field(1)
|
|
"""
|
|
Required. Image width in pixels. Should reflect the actual size of
|
|
image_bytes.
|
|
"""
|
|
|
|
height: int = betterproto.int32_field(2)
|
|
"""
|
|
Required. Image height in pixels. Should reflect the actual size of
|
|
image_bytes.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TextQuery(betterproto.Message):
|
|
"""Contains an unstructured text query to add to an image query."""
|
|
|
|
query: str = betterproto.string_field(1)
|
|
"""The unstructured text query, such as "blue" or "blouse"."""
|
|
|
|
is_primary: bool = betterproto.bool_field(2)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayInteractionRequestMetadata(betterproto.Message):
|
|
"""Metadata associated with an interaction request."""
|
|
|
|
type: "LensOverlayInteractionRequestMetadataType" = betterproto.enum_field(1)
|
|
selection_metadata: "LensOverlayInteractionRequestMetadataSelectionMetadata" = (
|
|
betterproto.message_field(2)
|
|
)
|
|
query_metadata: "LensOverlayInteractionRequestMetadataQueryMetadata" = (
|
|
betterproto.message_field(4)
|
|
)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayInteractionRequestMetadataSelectionMetadata(betterproto.Message):
|
|
"""
|
|
Metadata related to the selection associated with this interaction request.
|
|
"""
|
|
|
|
point: "LensOverlayInteractionRequestMetadataSelectionMetadataPoint" = (
|
|
betterproto.message_field(1, group="selection")
|
|
)
|
|
region: "LensOverlayInteractionRequestMetadataSelectionMetadataRegion" = (
|
|
betterproto.message_field(2, group="selection")
|
|
)
|
|
object: "LensOverlayInteractionRequestMetadataSelectionMetadataObject" = (
|
|
betterproto.message_field(3, group="selection")
|
|
)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayInteractionRequestMetadataSelectionMetadataPoint(betterproto.Message):
|
|
x: float = betterproto.float_field(1)
|
|
y: float = betterproto.float_field(2)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayInteractionRequestMetadataSelectionMetadataRegion(betterproto.Message):
|
|
region: "CenterRotatedBox" = betterproto.message_field(1)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayInteractionRequestMetadataSelectionMetadataObject(betterproto.Message):
|
|
object_id: str = betterproto.string_field(1)
|
|
geometry: "Geometry" = betterproto.message_field(2)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayInteractionRequestMetadataQueryMetadata(betterproto.Message):
|
|
"""Metadata related to query."""
|
|
|
|
text_query: "TextQuery" = betterproto.message_field(2)
|
|
"""The text query information."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class TranslateStickinessSignals(betterproto.Message):
|
|
"""
|
|
Signals specific to queries coming from translate stickiness extension.
|
|
"""
|
|
|
|
translate_suppress_echo_for_sticky: bool = betterproto.bool_field(1)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class FunctionCall(betterproto.Message):
|
|
"""A message representing the function call of an answers intent query."""
|
|
|
|
name: str = betterproto.string_field(1)
|
|
"""Name of this function call."""
|
|
|
|
argument: List["Argument"] = betterproto.message_field(2)
|
|
"""A list of arguments of this function call."""
|
|
|
|
signals: "FunctionCallSignals" = betterproto.message_field(4)
|
|
"""Signals at the function call level"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class FunctionCallSignals(betterproto.Message):
|
|
"""Signals at the function call level"""
|
|
|
|
translate_stickiness_signals: "TranslateStickinessSignals" = (
|
|
betterproto.message_field(311378150)
|
|
)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class Argument(betterproto.Message):
|
|
"""A message representing the function argument."""
|
|
|
|
name: str = betterproto.string_field(1)
|
|
"""Name of this argument."""
|
|
|
|
value: "ArgumentValue" = betterproto.message_field(2)
|
|
"""The value of this argument."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class ArgumentValue(betterproto.Message):
|
|
"""A message representing the value of an argument."""
|
|
|
|
simple_value: "SimpleValue" = betterproto.message_field(3, group="value")
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class SimpleValue(betterproto.Message):
|
|
"""A message representing a simple literal value."""
|
|
|
|
string_value: str = betterproto.string_field(1, group="value")
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class Query(betterproto.Message):
|
|
"""A Query is a representation of the meaning of the user query."""
|
|
|
|
intent_query: "FunctionCall" = betterproto.message_field(56249026)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class MathSolverQuery(betterproto.Message):
|
|
math_input_equation: str = betterproto.string_field(3)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class MessageSet(betterproto.Message):
|
|
"""This is proto2's version of MessageSet."""
|
|
|
|
message_set_extension: "Query" = betterproto.message_field(41401449)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class OverlayObject(betterproto.Message):
|
|
"""Overlay Object."""
|
|
|
|
id: str = betterproto.string_field(1)
|
|
"""The id."""
|
|
|
|
geometry: "Geometry" = betterproto.message_field(2)
|
|
"""The object geometry."""
|
|
|
|
rendering_metadata: "OverlayObjectRenderingMetadata" = betterproto.message_field(8)
|
|
"""The rendering metadata for the object."""
|
|
|
|
interaction_properties: "OverlayObjectInteractionProperties" = (
|
|
betterproto.message_field(4)
|
|
)
|
|
is_fulfilled: bool = betterproto.bool_field(9)
|
|
"""
|
|
Indicates to the client that this object is eligible to be an object
|
|
fulfillment request.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class OverlayObjectRenderingMetadata(betterproto.Message):
|
|
"""Rendering metadata for the object."""
|
|
|
|
render_type: "OverlayObjectRenderingMetadataRenderType" = betterproto.enum_field(1)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class OverlayObjectInteractionProperties(betterproto.Message):
|
|
select_on_tap: bool = betterproto.bool_field(1)
|
|
"""Whether an object can be tapped"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayRequestId(betterproto.Message):
|
|
"""
|
|
Request Id definition to support request sequencing and state lookup.
|
|
"""
|
|
|
|
uuid: int = betterproto.uint64_field(1)
|
|
"""A unique identifier for a sequence of related Lens requests."""
|
|
|
|
sequence_id: int = betterproto.int32_field(2)
|
|
"""
|
|
An id to indicate the order of the current request within a sequence of
|
|
requests sharing the same uuid. Starts from 1, increments by 1 if there is
|
|
a new request with the same uuid.
|
|
"""
|
|
|
|
image_sequence_id: int = betterproto.int32_field(3)
|
|
"""
|
|
An id to indicate the order of image payload sent within a sequence of
|
|
requests sharing the same uuid. Starts from 1, increments by 1 if there is
|
|
a new request with an image payload with the same uuid.
|
|
Note, region search request does not increment this id.
|
|
"""
|
|
|
|
analytics_id: bytes = betterproto.bytes_field(4)
|
|
"""
|
|
Analytics ID for the Lens request. Will be updated on the initial request
|
|
and once per interaction request.
|
|
"""
|
|
|
|
routing_info: "LensOverlayRoutingInfo" = betterproto.message_field(6)
|
|
"""Information about where to route the request."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayRequestContext(betterproto.Message):
|
|
"""Request context for a Lens Overlay request."""
|
|
|
|
request_id: "LensOverlayRequestId" = betterproto.message_field(3)
|
|
"""Required. Identifiers for this request."""
|
|
|
|
client_context: "LensOverlayClientContext" = betterproto.message_field(4)
|
|
"""The client context for the request."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayObjectsRequest(betterproto.Message):
|
|
request_context: "LensOverlayRequestContext" = betterproto.message_field(1)
|
|
"""Required. Basic information and context for the request."""
|
|
|
|
image_data: "ImageData" = betterproto.message_field(3)
|
|
"""Required. Image Data to process."""
|
|
|
|
payload: "Payload" = betterproto.message_field(4)
|
|
"""
|
|
Optional. Data payload of the request.
|
|
TODO(b/359638436): Mark required when clients have migrated to use Payload
|
|
field.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayObjectsResponse(betterproto.Message):
|
|
overlay_objects: List["OverlayObject"] = betterproto.message_field(2)
|
|
"""Overlay objects."""
|
|
|
|
text: "Text" = betterproto.message_field(3)
|
|
"""Text."""
|
|
|
|
deep_gleams: List["DeepGleamData"] = betterproto.message_field(4)
|
|
"""Gleams."""
|
|
|
|
cluster_info: "LensOverlayClusterInfo" = betterproto.message_field(7)
|
|
"""The cluster info."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayInteractionRequest(betterproto.Message):
|
|
request_context: "LensOverlayRequestContext" = betterproto.message_field(1)
|
|
"""Basic information and context for the request."""
|
|
|
|
interaction_request_metadata: "LensOverlayInteractionRequestMetadata" = (
|
|
betterproto.message_field(2)
|
|
)
|
|
"""Metadata associated with an interaction request."""
|
|
|
|
image_crop: "ImageCrop" = betterproto.message_field(3)
|
|
"""The image crop data."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayInteractionResponse(betterproto.Message):
|
|
encoded_response: str = betterproto.string_field(3)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class Payload(betterproto.Message):
|
|
"""Next ID: 9"""
|
|
|
|
request_type: "PayloadRequestType" = betterproto.enum_field(6)
|
|
"""Optional. The type of the request."""
|
|
|
|
image_data: "ImageData" = betterproto.message_field(2)
|
|
"""
|
|
Currently unset, use image_data in ObjectsRequest.
|
|
TODO(b/359638436): Move ObjectsRequest clients onto Payload.ImageData.
|
|
"""
|
|
|
|
content_data: bytes = betterproto.bytes_field(3)
|
|
"""
|
|
Data for non-image payloads. May be sent with or without an image in the
|
|
image_data field. If content_data is set, content_type must also be set.
|
|
"""
|
|
|
|
content_type: str = betterproto.string_field(4)
|
|
"""
|
|
The media type/MIME type of the data represented i content_data, e.g.
|
|
"application/pdf". If content_type is set, content_data should also be set.
|
|
"""
|
|
|
|
page_url: str = betterproto.string_field(5)
|
|
"""The page url this request was made on."""
|
|
|
|
partial_pdf_document: "LensOverlayDocument" = betterproto.message_field(7)
|
|
"""
|
|
The partially parsed PDF document. Used to get early suggest signals. This
|
|
is only set for REQUEST_TYPE_EARLY_PARTIAL_PDF.
|
|
"""
|
|
|
|
compression_type: "PayloadCompressionType" = betterproto.enum_field(8)
|
|
"""
|
|
Compression format of content_data. Currently only used for PDF data.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayServerClusterInfoRequest(betterproto.Message):
|
|
"""The cluster info request for a Lens Overlay session."""
|
|
|
|
enable_search_session_id: bool = betterproto.bool_field(1)
|
|
"""
|
|
Whether to return a search session id alongside the server session id.
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayServerClusterInfoResponse(betterproto.Message):
|
|
server_session_id: str = betterproto.string_field(1)
|
|
"""ID for subsequent server requests."""
|
|
|
|
search_session_id: str = betterproto.string_field(2)
|
|
"""ID for subsequent search requests."""
|
|
|
|
routing_info: "LensOverlayRoutingInfo" = betterproto.message_field(3)
|
|
"""The routing info for the server session."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayServerError(betterproto.Message):
|
|
"""
|
|
An error encountered while handling a request.
|
|
Next ID: 2
|
|
"""
|
|
|
|
error_type: "LensOverlayServerErrorErrorType" = betterproto.enum_field(1)
|
|
"""The error type."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayServerRequest(betterproto.Message):
|
|
"""Next ID: 4"""
|
|
|
|
objects_request: "LensOverlayObjectsRequest" = betterproto.message_field(1)
|
|
"""Options for fetching objects."""
|
|
|
|
interaction_request: "LensOverlayInteractionRequest" = betterproto.message_field(2)
|
|
"""Options for fetching interactions."""
|
|
|
|
client_logs: "LensOverlayClientLogs" = betterproto.message_field(3)
|
|
"""Client logs for the request."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayServerResponse(betterproto.Message):
|
|
"""
|
|
Response details for an LensOverlay request.
|
|
Next ID: 4
|
|
"""
|
|
|
|
error: "LensOverlayServerError" = betterproto.message_field(1)
|
|
"""The encountered error."""
|
|
|
|
objects_response: "LensOverlayObjectsResponse" = betterproto.message_field(2)
|
|
"""The objects response."""
|
|
|
|
interaction_response: "LensOverlayInteractionResponse" = betterproto.message_field(
|
|
3
|
|
)
|
|
"""The interaction response."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class StickinessSignals(betterproto.Message):
|
|
id_namespace: "StickinessSignalsNamespace" = betterproto.enum_field(1)
|
|
interpretation: "MessageSet" = betterproto.message_field(28)
|
|
education_input_extension: "EducationInputExtension" = betterproto.message_field(
|
|
121
|
|
)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class EducationInputExtension(betterproto.Message):
|
|
math_solver_query: "MathSolverQuery" = betterproto.message_field(1)
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayVideoContextInputParams(betterproto.Message):
|
|
url: str = betterproto.string_field(1)
|
|
"""Url of the video."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayVideoParams(betterproto.Message):
|
|
video_context_input_params: "LensOverlayVideoContextInputParams" = (
|
|
betterproto.message_field(1)
|
|
)
|
|
"""Video context params from input."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayVisualSearchInteractionLogData(betterproto.Message):
|
|
"""Log data for a Lens Overlay visual search interaction."""
|
|
|
|
filter_data: "FilterData" = betterproto.message_field(1)
|
|
"""Filter related metadata."""
|
|
|
|
user_selection_data: "UserSelectionData" = betterproto.message_field(2)
|
|
"""User Selection metadata."""
|
|
|
|
is_parent_query: bool = betterproto.bool_field(3)
|
|
"""Whether the query is a parent query."""
|
|
|
|
client_platform: "ClientPlatform" = betterproto.enum_field(4)
|
|
"""The client platform this query was originated from."""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class FilterData(betterproto.Message):
|
|
"""
|
|
Filter data.
|
|
Next ID: 2
|
|
"""
|
|
|
|
filter_type: "LensOverlayFilterType" = betterproto.enum_field(1)
|
|
"""
|
|
The filter type associated with this interaction (auto, translate, etc.).
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class UserSelectionData(betterproto.Message):
|
|
"""
|
|
User selection data.
|
|
Next ID: 2
|
|
"""
|
|
|
|
selection_type: "LensOverlaySelectionType" = betterproto.enum_field(1)
|
|
"""
|
|
The selection type associated with this interaction (e.g. region search).
|
|
"""
|
|
|
|
|
|
@dataclass(eq=False, repr=False)
|
|
class LensOverlayVisualSearchInteractionData(betterproto.Message):
|
|
"""Metadata associated with a Lens Visual Search request."""
|
|
|
|
interaction_type: "LensOverlayInteractionRequestMetadataType" = (
|
|
betterproto.enum_field(1)
|
|
)
|
|
"""The type of interaction."""
|
|
|
|
zoomed_crop: "ZoomedCrop" = betterproto.message_field(7)
|
|
"""The selected region for this interaction, instead of the object id."""
|
|
|
|
object_id: str = betterproto.string_field(3)
|
|
"""
|
|
The selected object id for this interaction, instead of the zoomed crop.
|
|
Currently unsupported and should not be populated.
|
|
"""
|
|
|
|
log_data: "LensOverlayVisualSearchInteractionLogData" = betterproto.message_field(5)
|
|
"""Logging-specific data."""
|