models.py•65.8 kB
# generated by fastapi-codegen:
#   filename:  openapi.yaml
#   timestamp: 2025-06-29T03:02:49+00:00
from __future__ import annotations
from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, RootModel
class AndroidAppInfo(BaseModel):
    name: Optional[str] = Field(None, description='The name of the app. Optional')
    packageName: Optional[str] = Field(
        None, description='The package name of the app. Required.'
    )
    versionCode: Optional[str] = Field(
        None, description='The internal version code of the app. Optional.'
    )
    versionName: Optional[str] = Field(
        None, description='The version name of the app. Optional.'
    )
class AndroidInstrumentationTest(BaseModel):
    testPackageId: Optional[str] = Field(
        None, description='The java package for the test to be executed. Required'
    )
    testRunnerClass: Optional[str] = Field(
        None, description='The InstrumentationTestRunner class. Required'
    )
    testTargets: Optional[List[str]] = Field(
        None,
        description='Each target must be fully qualified with the package name or class name, in one of these formats: - "package package_name" - "class package_name.class_name" - "class package_name.class_name#method_name" If empty, all targets in the module will be run.',
    )
    useOrchestrator: Optional[bool] = Field(
        None,
        description='The flag indicates whether Android Test Orchestrator will be used to run test or not.',
    )
class AndroidRoboTest(BaseModel):
    appInitialActivity: Optional[str] = Field(
        None,
        description='The initial activity that should be used to start the app. Optional',
    )
    bootstrapPackageId: Optional[str] = Field(
        None, description='The java package for the bootstrap. Optional'
    )
    bootstrapRunnerClass: Optional[str] = Field(
        None, description='The runner class for the bootstrap. Optional'
    )
    maxDepth: Optional[int] = Field(
        None,
        description='The max depth of the traversal stack Robo can explore. Optional',
    )
    maxSteps: Optional[int] = Field(
        None,
        description='The max number of steps/actions Robo can execute. Default is no limit (0). Optional',
    )
class AndroidTestLoop(BaseModel):
    pass
class AnyModel(BaseModel):
    typeUrl: Optional[str] = Field(
        None,
        description='A URL/resource name that uniquely identifies the type of the serialized protocol buffer message. This string must contain at least one "/" character. The last segment of the URL\'s path must represent the fully qualified name of the type (as in `path/google.protobuf.Duration`). The name should be in a canonical form (e.g., leading "." is not accepted). In practice, teams usually precompile into the binary all types that they expect it to use in the context of Any. However, for URLs which use the scheme `http`, `https`, or no scheme, one can optionally set up a type server that maps type URLs to message definitions as follows: * If no scheme is provided, `https` is assumed. * An HTTP GET on the URL must yield a google.protobuf.Type value in binary format, or produce an error. * Applications are allowed to cache lookup results based on the URL, or have them precompiled into a binary to avoid any lookup. Therefore, binary compatibility needs to be preserved on changes to types. (Use versioned type names to manage breaking changes.) Note: this functionality is not currently available in the official protobuf release, and it is not used for type URLs beginning with type.googleapis.com. Schemes other than `http`, `https` (or the empty scheme) might be used with implementation specific semantics.',
    )
    value: Optional[str] = Field(
        None,
        description='Must be a valid serialized protocol buffer of the above specified type.',
    )
class AvailableDeepLinks(BaseModel):
    pass
class PerfMetricType(Enum):
    perfMetricTypeUnspecified = 'perfMetricTypeUnspecified'
    memory = 'memory'
    cpu = 'cpu'
    network = 'network'
    graphics = 'graphics'
class PerfUnit(Enum):
    perfUnitUnspecified = 'perfUnitUnspecified'
    kibibyte = 'kibibyte'
    percent = 'percent'
    bytesPerSecond = 'bytesPerSecond'
    framesPerSecond = 'framesPerSecond'
    byte = 'byte'
class SampleSeriesLabel(Enum):
    sampleSeriesTypeUnspecified = 'sampleSeriesTypeUnspecified'
    memoryRssPrivate = 'memoryRssPrivate'
    memoryRssShared = 'memoryRssShared'
    memoryRssTotal = 'memoryRssTotal'
    memoryTotal = 'memoryTotal'
    cpuUser = 'cpuUser'
    cpuKernel = 'cpuKernel'
    cpuTotal = 'cpuTotal'
    ntBytesTransferred = 'ntBytesTransferred'
    ntBytesReceived = 'ntBytesReceived'
    networkSent = 'networkSent'
    networkReceived = 'networkReceived'
    graphicsFrameRate = 'graphicsFrameRate'
class BasicPerfSampleSeries(BaseModel):
    perfMetricType: Optional[PerfMetricType] = None
    perfUnit: Optional[PerfUnit] = None
    sampleSeriesLabel: Optional[SampleSeriesLabel] = None
class BlankScreen(BaseModel):
    screenId: Optional[str] = Field(None, description='The screen id of the element')
class CPUInfo(BaseModel):
    cpuProcessor: Optional[str] = Field(
        None,
        description="description of the device processor ie '1.8 GHz hexa core 64-bit ARMv8-A'",
    )
    cpuSpeedInGhz: Optional[float] = Field(
        None, description='the CPU clock speed in GHz'
    )
    numberOfCores: Optional[int] = Field(None, description='the number of CPU cores')
class CrashDialogError(BaseModel):
    crashPackage: Optional[str] = Field(
        None, description='The name of the package that caused the dialog.'
    )
class DetectedAppSplashScreen(BaseModel):
    pass
class DeviceOutOfMemory(BaseModel):
    pass
class Duration(BaseModel):
    nanos: Optional[int] = Field(
        None,
        description='Signed fractions of a second at nanosecond resolution of the span of time. Durations less than one second are represented with a 0 `seconds` field and a positive or negative `nanos` field. For durations of one second or more, a non-zero value for the `nanos` field must be of the same sign as the `seconds` field. Must be from -999,999,999 to +999,999,999 inclusive.',
    )
    seconds: Optional[str] = Field(
        None,
        description='Signed seconds of the span of time. Must be from -315,576,000,000 to +315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years',
    )
class EncounteredLoginScreen(BaseModel):
    distinctScreens: Optional[int] = Field(
        None, description='Number of encountered distinct login screens.'
    )
    screenIds: Optional[List[str]] = Field(None, description='Subset of login screens.')
class EncounteredNonAndroidUiWidgetScreen(BaseModel):
    distinctScreens: Optional[int] = Field(
        None,
        description='Number of encountered distinct screens with non Android UI widgets.',
    )
    screenIds: Optional[List[str]] = Field(
        None, description='Subset of screens which contain non Android UI widgets.'
    )
class EnvironmentDimensionValueEntry(BaseModel):
    key: Optional[str] = None
    value: Optional[str] = None
class State(Enum):
    unknownState = 'unknownState'
    pending = 'pending'
    inProgress = 'inProgress'
    complete = 'complete'
class FailedToInstall(BaseModel):
    pass
class FailureDetail(BaseModel):
    crashed: Optional[bool] = Field(
        None,
        description='If the failure was severe because the system (app) under test crashed.',
    )
    deviceOutOfMemory: Optional[bool] = Field(
        None,
        description='If the device ran out of memory during a test, causing the test to crash.',
    )
    failedRoboscript: Optional[bool] = Field(
        None,
        description='If the Roboscript failed to complete successfully, e.g., because a Roboscript action or assertion failed or a Roboscript action could not be matched during the entire crawl.',
    )
    notInstalled: Optional[bool] = Field(
        None,
        description='If an app is not installed and thus no test can be run with the app. This might be caused by trying to run a test on an unsupported platform.',
    )
    otherNativeCrash: Optional[bool] = Field(
        None,
        description='If a native process (including any other than the app) crashed.',
    )
    timedOut: Optional[bool] = Field(
        None,
        description='If the test overran some time limit, and that is why it failed.',
    )
    unableToCrawl: Optional[bool] = Field(
        None,
        description='If the robo was unable to crawl the app; perhaps because the app did not start.',
    )
class FileReference(BaseModel):
    fileUri: Optional[str] = Field(
        None,
        description='The URI of a file stored in Google Cloud Storage. For example: http://storage.googleapis.com/mybucket/path/to/test.xml or in gsutil format: gs://mybucket/path/to/test.xml with version-specific info, gs://mybucket/path/to/test.xml#1360383693690000 An INVALID_ARGUMENT error will be returned if the URI format is not supported. - In response: always set - In create/update request: always set',
    )
class GraphicsStatsBucket(BaseModel):
    frameCount: Optional[str] = Field(
        None, description='Number of frames in the bucket.'
    )
    renderMillis: Optional[str] = Field(
        None, description='Lower bound of render time in milliseconds.'
    )
class TestPlatform(Enum):
    unknownPlatform = 'unknownPlatform'
    android = 'android'
    ios = 'ios'
class History(BaseModel):
    displayName: Optional[str] = Field(
        None,
        description='A short human-readable (plain text) name to display in the UI. Maximum of 100 characters. - In response: present if set during create. - In create request: optional',
    )
    historyId: Optional[str] = Field(
        None,
        description='A unique identifier within a project for this History. Returns INVALID_ARGUMENT if this field is set or overwritten by the caller. - In response always set - In create request: never set',
    )
    name: Optional[str] = Field(
        None,
        description='A name to uniquely identify a history within a project. Maximum of 200 characters. - In response always set - In create request: always set',
    )
    testPlatform: Optional[TestPlatform] = Field(
        None,
        description='The platform of the test history. - In response: always set. Returns the platform of the last execution if unknown.',
    )
class InAppPurchasesFound(BaseModel):
    inAppPurchasesFlowsExplored: Optional[int] = Field(
        None,
        description='The total number of in-app purchases flows explored: how many times the robo tries to buy a SKU.',
    )
    inAppPurchasesFlowsStarted: Optional[int] = Field(
        None, description='The total number of in-app purchases flows started.'
    )
class InconclusiveDetail(BaseModel):
    abortedByUser: Optional[bool] = Field(
        None,
        description='If the end user aborted the test execution before a pass or fail could be determined. For example, the user pressed ctrl-c which sent a kill signal to the test runner while the test was running.',
    )
    hasErrorLogs: Optional[bool] = Field(
        None,
        description='If results are being provided to the user in certain cases of infrastructure failures',
    )
    infrastructureFailure: Optional[bool] = Field(
        None,
        description='If the test runner could not determine success or failure because the test depends on a component other than the system under test which failed. For example, a mobile test requires provisioning a device where the test executes, and that provisioning can fail.',
    )
class OutcomeSummary(Enum):
    unset = 'unset'
    success = 'success'
    failure = 'failure'
    inconclusive = 'inconclusive'
    skipped = 'skipped'
    flaky = 'flaky'
class IndividualOutcome(BaseModel):
    multistepNumber: Optional[int] = Field(
        None,
        description='Unique int given to each step. Ranges from 0(inclusive) to total number of steps(exclusive). The primary step is 0.',
    )
    outcomeSummary: Optional[OutcomeSummary] = None
    runDuration: Optional[Duration] = Field(
        None, description='How long it took for this step to run.'
    )
    stepId: Optional[str] = None
class InsufficientCoverage(BaseModel):
    pass
class IosAppInfo(BaseModel):
    name: Optional[str] = Field(None, description='The name of the app. Required')
class IosRoboTest(BaseModel):
    pass
class IosTestLoop(BaseModel):
    bundleId: Optional[str] = Field(None, description='Bundle ID of the app.')
class IosXcTest(BaseModel):
    bundleId: Optional[str] = Field(None, description='Bundle ID of the app.')
    xcodeVersion: Optional[str] = Field(
        None, description='Xcode version that the test was run with.'
    )
class LauncherActivityNotFound(BaseModel):
    pass
class ListHistoriesResponse(BaseModel):
    histories: Optional[List[History]] = Field(None, description='Histories.')
    nextPageToken: Optional[str] = Field(
        None,
        description='A continuation token to resume the query at the next item. Will only be set if there are more histories to fetch. Tokens are valid for up to one hour from the time of the first list request. For instance, if you make a list request at 1PM and use the token from this first request 10 minutes later, the token from this second response will only be valid for 50 minutes.',
    )
class LogcatCollectionError(BaseModel):
    pass
class MatrixDimensionDefinition(BaseModel):
    pass
class MemoryInfo(BaseModel):
    memoryCapInKibibyte: Optional[str] = Field(
        None, description='Maximum memory that can be allocated to the process in KiB'
    )
    memoryTotalInKibibyte: Optional[str] = Field(
        None, description='Total memory available on the device in KiB'
    )
class ListModel(Enum):
    NONE = 'NONE'
    WHITE = 'WHITE'
    BLACK = 'BLACK'
    GREY = 'GREY'
    GREY_MAX_O = 'GREY_MAX_O'
    GREY_MAX_P = 'GREY_MAX_P'
    GREY_MAX_Q = 'GREY_MAX_Q'
    GREY_MAX_R = 'GREY_MAX_R'
class NonSdkApiUsageViolation(BaseModel):
    apiSignatures: Optional[List[str]] = Field(
        None, description="Signatures of a subset of those hidden API's."
    )
    uniqueApis: Optional[int] = Field(
        None, description="Total number of unique hidden API's accessed."
    )
class Summary(Enum):
    unset = 'unset'
    success = 'success'
    failure = 'failure'
    inconclusive = 'inconclusive'
    skipped = 'skipped'
    flaky = 'flaky'
class OverlappingUIElements(BaseModel):
    resourceName: Optional[List[str]] = Field(
        None, description='Resource names of the overlapping screen elements'
    )
    screenId: Optional[str] = Field(None, description='The screen id of the elements')
class PendingGoogleUpdateInsight(BaseModel):
    nameOfGoogleLibrary: Optional[str] = Field(
        None,
        description='The name of the Google-provided library with the non-SDK API dependency.',
    )
class PerfEnvironment(BaseModel):
    cpuInfo: Optional[CPUInfo] = Field(None, description='CPU related environment info')
    memoryInfo: Optional[MemoryInfo] = Field(
        None, description='Memory related environment info'
    )
class PerfMetric(Enum):
    perfMetricTypeUnspecified = 'perfMetricTypeUnspecified'
    memory = 'memory'
    cpu = 'cpu'
    network = 'network'
    graphics = 'graphics'
class PerfSampleSeries(BaseModel):
    basicPerfSampleSeries: Optional[BasicPerfSampleSeries] = Field(
        None, description='Basic series represented by a line chart'
    )
    executionId: Optional[str] = Field(
        None, description='A tool results execution ID. @OutputOnly'
    )
    historyId: Optional[str] = Field(
        None, description='A tool results history ID. @OutputOnly'
    )
    projectId: Optional[str] = Field(None, description='The cloud project @OutputOnly')
    sampleSeriesId: Optional[str] = Field(
        None, description='A sample series id @OutputOnly'
    )
    stepId: Optional[str] = Field(
        None, description='A tool results step ID. @OutputOnly'
    )
class PerformedGoogleLogin(BaseModel):
    pass
class PerformedMonkeyActions(BaseModel):
    totalActions: Optional[int] = Field(
        None,
        description='The total number of monkey actions performed during the crawl.',
    )
class RollUp(Enum):
    unset = 'unset'
    success = 'success'
    failure = 'failure'
    inconclusive = 'inconclusive'
    skipped = 'skipped'
    flaky = 'flaky'
class PrimaryStep(BaseModel):
    individualOutcome: Optional[List[IndividualOutcome]] = Field(
        None, description='Step Id and outcome of each individual step.'
    )
    rollUp: Optional[RollUp] = Field(
        None,
        description='Rollup test status of multiple steps that were run with the same configuration as a group.',
    )
class ProjectSettings(BaseModel):
    defaultBucket: Optional[str] = Field(
        None,
        description='The name of the Google Cloud Storage bucket to which results are written. By default, this is unset. In update request: optional In response: optional',
    )
    name: Optional[str] = Field(
        None,
        description="The name of the project's settings. Always of the form: projects/{project-id}/settings In update request: never set In response: always set",
    )
class PublishXunitXmlFilesRequest(BaseModel):
    xunitXmlFiles: Optional[List[FileReference]] = Field(
        None,
        description='URI of the Xunit XML files to publish. The maximum size of the file this reference is pointing to is 50MB. Required.',
    )
class RegionProto(BaseModel):
    heightPx: Optional[int] = Field(
        None, description='The height, in pixels. Always set.'
    )
    leftPx: Optional[int] = Field(
        None, description='The left side of the rectangle, in pixels. Always set.'
    )
    topPx: Optional[int] = Field(
        None, description='The top of the rectangle, in pixels. Always set.'
    )
    widthPx: Optional[int] = Field(
        None, description='The width, in pixels. Always set.'
    )
class ResultsStorage(BaseModel):
    resultsStoragePath: Optional[FileReference] = Field(
        None, description='The root directory for test results.'
    )
    xunitXmlFile: Optional[FileReference] = Field(
        None, description='The path to the Xunit XML file.'
    )
class RoboScriptExecution(BaseModel):
    successfulActions: Optional[int] = Field(
        None, description='The number of Robo script actions executed successfully.'
    )
    totalActions: Optional[int] = Field(
        None, description='The total number of actions in the Robo script.'
    )
class SafeHtmlProto(BaseModel):
    privateDoNotAccessOrElseSafeHtmlWrappedValue: Optional[str] = Field(
        None,
        description='IMPORTANT: Never set or read this field, even from tests, it is private. See documentation at the top of .proto file for programming language packages with which to create or read this message.',
    )
class Screen(BaseModel):
    fileReference: Optional[str] = Field(
        None, description='File reference of the png file. Required.'
    )
    locale: Optional[str] = Field(
        None,
        description='Locale of the device that the screenshot was taken on. Required.',
    )
    model: Optional[str] = Field(
        None,
        description='Model of the device that the screenshot was taken on. Required.',
    )
    version: Optional[str] = Field(
        None,
        description='OS version of the device that the screenshot was taken on. Required.',
    )
class ScreenshotCluster(BaseModel):
    activity: Optional[str] = Field(
        None,
        description='A string that describes the activity of every screen in the cluster.',
    )
    clusterId: Optional[str] = Field(
        None, description='A unique identifier for the cluster. @OutputOnly'
    )
    keyScreen: Optional[Screen] = Field(
        None,
        description='A singular screen that represents the cluster as a whole. This screen will act as the "cover" of the entire cluster. When users look at the clusters, only the key screen from each cluster will be shown. Which screen is the key screen is determined by the ClusteringAlgorithm',
    )
    screens: Optional[List[Screen]] = Field(None, description='Full list of screens.')
class SkippedDetail(BaseModel):
    incompatibleAppVersion: Optional[bool] = Field(
        None, description="If the App doesn't support the specific API level."
    )
    incompatibleArchitecture: Optional[bool] = Field(
        None,
        description="If the App doesn't run on the specific architecture, for example, x86.",
    )
    incompatibleDevice: Optional[bool] = Field(
        None,
        description="If the requested OS version doesn't run on the specific device model.",
    )
class StackTrace(BaseModel):
    exception: Optional[str] = Field(
        None, description='The stack trace message. Required'
    )
class StartActivityNotFound(BaseModel):
    action: Optional[str] = None
    uri: Optional[str] = None
class Status(BaseModel):
    code: Optional[int] = Field(
        None,
        description='The status code, which should be an enum value of google.rpc.Code.',
    )
    details: Optional[List[Dict[str, Any]]] = Field(
        None,
        description='A list of messages that carry the error details. There is a common set of message types for APIs to use.',
    )
    message: Optional[str] = Field(
        None,
        description='A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.',
    )
class StepDimensionValueEntry(BaseModel):
    key: Optional[str] = None
    value: Optional[str] = None
class StepLabelsEntry(BaseModel):
    key: Optional[str] = None
    value: Optional[str] = None
class StepSummary(BaseModel):
    pass
class SuccessDetail(BaseModel):
    otherNativeCrash: Optional[bool] = Field(
        None, description='If a native process other than the app crashed.'
    )
class Category(Enum):
    unknownCategory = 'unknownCategory'
    contentLabeling = 'contentLabeling'
    touchTargetSize = 'touchTargetSize'
    lowContrast = 'lowContrast'
    implementation = 'implementation'
class Priority(Enum):
    unknownPriority = 'unknownPriority'
    error = 'error'
    warning = 'warning'
    info = 'info'
class SuggestionProto(BaseModel):
    helpUrl: Optional[str] = Field(
        None,
        description='Reference to a help center article concerning this type of suggestion. Always set.',
    )
    longMessage: Optional[SafeHtmlProto] = Field(
        None,
        description="Message, in the user's language, explaining the suggestion, which may contain markup. Always set.",
    )
    priority: Optional[Priority] = Field(
        None, description='Relative importance of a suggestion. Always set.'
    )
    pseudoResourceId: Optional[str] = Field(
        None,
        description='A somewhat human readable identifier of the source view, if it does not have a resource_name. This is a path within the accessibility hierarchy, an element with resource name; similar to an XPath.',
    )
    region: Optional[RegionProto] = Field(
        None,
        description='Region within the screenshot that is relevant to this suggestion. Optional.',
    )
    resourceName: Optional[str] = Field(
        None,
        description='Reference to a view element, identified by its resource name, if it has one.',
    )
    screenId: Optional[str] = Field(
        None,
        description='ID of the screen for the suggestion. It is used for getting the corresponding screenshot path. For example, screen_id "1" corresponds to "1.png" file in GCS. Always set.',
    )
    secondaryPriority: Optional[float] = Field(
        None,
        description='Relative importance of a suggestion as compared with other suggestions that have the same priority and category. This is a meaningless value that can be used to order suggestions that are in the same category and have the same priority. The larger values have higher priority (i.e., are more important). Optional.',
    )
    shortMessage: Optional[SafeHtmlProto] = Field(
        None,
        description="Concise message, in the user's language, representing the suggestion, which may contain markup. Always set.",
    )
    title: Optional[str] = Field(
        None,
        description="General title for the suggestion, in the user's language, without markup. Always set.",
    )
class Status1(Enum):
    passed = 'passed'
    failed = 'failed'
    error = 'error'
    skipped = 'skipped'
    flaky = 'flaky'
class TestCaseReference(BaseModel):
    className: Optional[str] = Field(None, description='The name of the class.')
    name: Optional[str] = Field(
        None, description='The name of the test case. Required.'
    )
    testSuiteName: Optional[str] = Field(
        None, description='The name of the test suite to which this test case belongs.'
    )
class Category1(Enum):
    unspecifiedCategory = 'unspecifiedCategory'
    common = 'common'
    robo = 'robo'
class Severity(Enum):
    unspecifiedSeverity = 'unspecifiedSeverity'
    info = 'info'
    suggestion = 'suggestion'
    warning = 'warning'
    severe = 'severe'
class Type(Enum):
    unspecifiedType = 'unspecifiedType'
    fatalException = 'fatalException'
    nativeCrash = 'nativeCrash'
    anr = 'anr'
    unusedRoboDirective = 'unusedRoboDirective'
    compatibleWithOrchestrator = 'compatibleWithOrchestrator'
    launcherActivityNotFound = 'launcherActivityNotFound'
    startActivityNotFound = 'startActivityNotFound'
    incompleteRoboScriptExecution = 'incompleteRoboScriptExecution'
    completeRoboScriptExecution = 'completeRoboScriptExecution'
    failedToInstall = 'failedToInstall'
    availableDeepLinks = 'availableDeepLinks'
    nonSdkApiUsageViolation = 'nonSdkApiUsageViolation'
    nonSdkApiUsageReport = 'nonSdkApiUsageReport'
    encounteredNonAndroidUiWidgetScreen = 'encounteredNonAndroidUiWidgetScreen'
    encounteredLoginScreen = 'encounteredLoginScreen'
    performedGoogleLogin = 'performedGoogleLogin'
    iosException = 'iosException'
    iosCrash = 'iosCrash'
    performedMonkeyActions = 'performedMonkeyActions'
    usedRoboDirective = 'usedRoboDirective'
    usedRoboIgnoreDirective = 'usedRoboIgnoreDirective'
    insufficientCoverage = 'insufficientCoverage'
    inAppPurchases = 'inAppPurchases'
    crashDialogError = 'crashDialogError'
    uiElementsTooDeep = 'uiElementsTooDeep'
    blankScreen = 'blankScreen'
    overlappingUiElements = 'overlappingUiElements'
    unityException = 'unityException'
    deviceOutOfMemory = 'deviceOutOfMemory'
    logcatCollectionError = 'logcatCollectionError'
    detectedAppSplashScreen = 'detectedAppSplashScreen'
class TestIssue(BaseModel):
    category: Optional[Category1] = Field(
        None, description='Category of issue. Required.'
    )
    errorMessage: Optional[str] = Field(
        None,
        description='A brief human-readable message describing the issue. Required.',
    )
    severity: Optional[Severity] = Field(
        None, description='Severity of issue. Required.'
    )
    stackTrace: Optional[StackTrace] = Field(
        None,
        description='Deprecated in favor of stack trace fields inside specific warnings.',
    )
    type: Optional[Type] = Field(None, description='Type of issue. Required.')
    warning: Optional[AnyModel] = Field(
        None,
        description='Warning message with additional details of the issue. Should always be a message from com.google.devtools.toolresults.v1.warnings',
    )
class TestSuiteOverview(BaseModel):
    elapsedTime: Optional[Duration] = Field(
        None, description='Elapsed time of test suite.'
    )
    errorCount: Optional[int] = Field(
        None,
        description='Number of test cases in error, typically set by the service by parsing the xml_source. - In create/response: always set - In update request: never',
    )
    failureCount: Optional[int] = Field(
        None,
        description='Number of failed test cases, typically set by the service by parsing the xml_source. May also be set by the user. - In create/response: always set - In update request: never',
    )
    flakyCount: Optional[int] = Field(
        None,
        description='Number of flaky test cases, set by the service by rolling up flaky test attempts. Present only for rollup test suite overview at environment level. A step cannot have flaky test cases.',
    )
    name: Optional[str] = Field(
        None,
        description='The name of the test suite. - In create/response: always set - In update request: never',
    )
    skippedCount: Optional[int] = Field(
        None,
        description='Number of test cases not run, typically set by the service by parsing the xml_source. - In create/response: always set - In update request: never',
    )
    totalCount: Optional[int] = Field(
        None,
        description='Number of test cases, typically set by the service by parsing the xml_source. - In create/response: always set - In update request: never',
    )
    xmlSource: Optional[FileReference] = Field(
        None,
        description='If this test suite was parsed from XML, this is the URI where the original XML file is stored. Note: Multiple test suites can share the same xml_source Returns INVALID_ARGUMENT if the uri format is not supported. - In create/response: optional - In update request: never',
    )
class TestTiming(BaseModel):
    testProcessDuration: Optional[Duration] = Field(
        None,
        description='How long it took to run the test process. - In response: present if previously set. - In create/update request: optional',
    )
class Thumbnail(BaseModel):
    contentType: Optional[str] = Field(
        None, description='The thumbnail\'s content type, i.e. "image/png". Always set.'
    )
    data: Optional[str] = Field(
        None,
        description='The thumbnail file itself. That is, the bytes here are precisely the bytes that make up the thumbnail file; they can be served as an image as-is (with the appropriate content type.) Always set.',
    )
    heightPx: Optional[int] = Field(
        None, description='The height of the thumbnail, in pixels. Always set.'
    )
    widthPx: Optional[int] = Field(
        None, description='The width of the thumbnail, in pixels. Always set.'
    )
class Timestamp(BaseModel):
    nanos: Optional[int] = Field(
        None,
        description='Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive.',
    )
    seconds: Optional[str] = Field(
        None,
        description='Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.',
    )
class ToolExitCode(BaseModel):
    number: Optional[int] = Field(
        None,
        description='Tool execution exit code. A value of 0 means that the execution was successful. - In response: always set - In create/update request: always set',
    )
class ToolOutputReference(BaseModel):
    creationTime: Optional[Timestamp] = Field(
        None,
        description='The creation time of the file. - In response: present if set by create/update request - In create/update request: optional',
    )
    output: Optional[FileReference] = Field(
        None,
        description='A FileReference to an output file. - In response: always set - In create/update request: always set',
    )
    testCase: Optional[TestCaseReference] = Field(
        None,
        description='The test case to which this output file belongs. - In response: present if set by create/update request - In create/update request: optional',
    )
class UIElementTooDeep(BaseModel):
    depth: Optional[int] = Field(None, description='The depth of the screen element')
    screenId: Optional[str] = Field(None, description='The screen id of the element')
    screenStateId: Optional[str] = Field(
        None, description='The screen state id of the element'
    )
class UnspecifiedWarning(BaseModel):
    pass
class UnusedRoboDirective(BaseModel):
    resourceName: Optional[str] = Field(
        None, description='The name of the resource that was unused.'
    )
class UpgradeInsight(BaseModel):
    packageName: Optional[str] = Field(
        None, description='The name of the package to be upgraded.'
    )
    upgradeToVersion: Optional[str] = Field(
        None,
        description='The suggested version to upgrade to. Optional: In case we are not sure which version solves this problem',
    )
class UsedRoboDirective(BaseModel):
    resourceName: Optional[str] = Field(
        None, description='The name of the resource that was used.'
    )
class UsedRoboIgnoreDirective(BaseModel):
    resourceName: Optional[str] = Field(
        None, description='The name of the resource that was ignored.'
    )
class FieldXgafv(Enum):
    field_1 = '1'
    field_2 = '2'
class Alt(Enum):
    json = 'json'
    media = 'media'
    proto = 'proto'
class FilterEnum(Enum):
    perfMetricTypeUnspecified = 'perfMetricTypeUnspecified'
    memory = 'memory'
    cpu = 'cpu'
    network = 'network'
    graphics = 'graphics'
class Filter(RootModel[List[FilterEnum]]):
    root: List[FilterEnum]
class ANR(BaseModel):
    stackTrace: Optional[StackTrace] = Field(
        None, description='The stack trace of the ANR crash. Optional.'
    )
class AndroidTest(BaseModel):
    androidAppInfo: Optional[AndroidAppInfo] = Field(
        None, description='Information about the application under test.'
    )
    androidInstrumentationTest: Optional[AndroidInstrumentationTest] = Field(
        None, description='An Android instrumentation test.'
    )
    androidRoboTest: Optional[AndroidRoboTest] = Field(
        None, description='An Android robo test.'
    )
    androidTestLoop: Optional[AndroidTestLoop] = Field(
        None, description='An Android test loop.'
    )
    testTimeout: Optional[Duration] = Field(
        None,
        description='Max time a test is allowed to run before it is automatically cancelled.',
    )
class AppStartTime(BaseModel):
    fullyDrawnTime: Optional[Duration] = Field(
        None,
        description='Optional. The time from app start to reaching the developer-reported "fully drawn" time. This is only stored if the app includes a call to Activity.reportFullyDrawn(). See https://developer.android.com/topic/performance/launch-time.html#time-full',
    )
    initialDisplayTime: Optional[Duration] = Field(
        None,
        description='The time from app start to the first displayed activity being drawn, as reported in Logcat. See https://developer.android.com/topic/performance/launch-time.html#time-initial',
    )
class FatalException(BaseModel):
    stackTrace: Optional[StackTrace] = Field(
        None, description='The stack trace of the fatal exception. Optional.'
    )
class GraphicsStats(BaseModel):
    buckets: Optional[List[GraphicsStatsBucket]] = Field(
        None,
        description='Histogram of frame render times. There should be 154 buckets ranging from [5ms, 6ms) to [4950ms, infinity)',
    )
    highInputLatencyCount: Optional[str] = Field(
        None, description='Total "high input latency" events.'
    )
    jankyFrames: Optional[str] = Field(
        None,
        description='Total frames with slow render time. Should be <= total_frames.',
    )
    missedVsyncCount: Optional[str] = Field(
        None, description='Total "missed vsync" events.'
    )
    p50Millis: Optional[str] = Field(
        None, description='50th percentile frame render time in milliseconds.'
    )
    p90Millis: Optional[str] = Field(
        None, description='90th percentile frame render time in milliseconds.'
    )
    p95Millis: Optional[str] = Field(
        None, description='95th percentile frame render time in milliseconds.'
    )
    p99Millis: Optional[str] = Field(
        None, description='99th percentile frame render time in milliseconds.'
    )
    slowBitmapUploadCount: Optional[str] = Field(
        None, description='Total "slow bitmap upload" events.'
    )
    slowDrawCount: Optional[str] = Field(None, description='Total "slow draw" events.')
    slowUiThreadCount: Optional[str] = Field(
        None, description='Total "slow UI thread" events.'
    )
    totalFrames: Optional[str] = Field(
        None, description='Total frames rendered by package.'
    )
class Image(BaseModel):
    error: Optional[Status] = Field(
        None, description='An error explaining why the thumbnail could not be rendered.'
    )
    sourceImage: Optional[ToolOutputReference] = Field(
        None,
        description='A reference to the full-size, original image. This is the same as the tool_outputs entry for the image under its Step. Always set.',
    )
    stepId: Optional[str] = Field(
        None, description='The step to which the image is attached. Always set.'
    )
    thumbnail: Optional[Thumbnail] = Field(None, description='The thumbnail.')
class IosAppCrashed(BaseModel):
    stackTrace: Optional[StackTrace] = Field(
        None, description='The stack trace, if one is available. Optional.'
    )
class IosTest(BaseModel):
    iosAppInfo: Optional[IosAppInfo] = Field(
        None, description='Information about the application under test.'
    )
    iosRoboTest: Optional[IosRoboTest] = Field(None, description='An iOS Robo test.')
    iosTestLoop: Optional[IosTestLoop] = Field(None, description='An iOS test loop.')
    iosXcTest: Optional[IosXcTest] = Field(None, description='An iOS XCTest.')
    testTimeout: Optional[Duration] = Field(
        None,
        description='Max time a test is allowed to run before it is automatically cancelled.',
    )
class ListPerfSampleSeriesResponse(BaseModel):
    perfSampleSeries: Optional[List[PerfSampleSeries]] = Field(
        None, description='The resulting PerfSampleSeries sorted by id'
    )
class ListScreenshotClustersResponse(BaseModel):
    clusters: Optional[List[ScreenshotCluster]] = Field(
        None, description='The set of clusters associated with an execution Always set'
    )
class ListStepThumbnailsResponse(BaseModel):
    nextPageToken: Optional[str] = Field(
        None,
        description='A continuation token to resume the query at the next item. If set, indicates that there are more thumbnails to read, by calling list again with this value in the page_token field.',
    )
    thumbnails: Optional[List[Image]] = Field(
        None,
        description='A list of image data. Images are returned in a deterministic order; they are ordered by these factors, in order of importance: * First, by their associated test case. Images without a test case are considered greater than images with one. * Second, by their creation time. Images without a creation time are greater than images with one. * Third, by the order in which they were added to the step (by calls to CreateStep or UpdateStep).',
    )
class MultiStep(BaseModel):
    multistepNumber: Optional[int] = Field(
        None,
        description='Unique int given to each step. Ranges from 0(inclusive) to total number of steps(exclusive). The primary step is 0.',
    )
    primaryStep: Optional[PrimaryStep] = Field(
        None, description='Present if it is a primary (original) step.'
    )
    primaryStepId: Optional[str] = Field(
        None,
        description='Step Id of the primary (original) step, which might be this step.',
    )
class NativeCrash(BaseModel):
    stackTrace: Optional[StackTrace] = Field(
        None, description='The stack trace of the native crash. Optional.'
    )
class NonSdkApiInsight(BaseModel):
    exampleTraceMessages: Optional[List[str]] = Field(
        None,
        description='Optional sample stack traces, for which this insight applies (there should be at least one).',
    )
    matcherId: Optional[str] = Field(
        None,
        description='A unique ID, to be used for determining the effectiveness of this particular insight in the context of a matcher. (required)',
    )
    pendingGoogleUpdateInsight: Optional[PendingGoogleUpdateInsight] = Field(
        None,
        description='An insight indicating that the hidden API usage originates from a Google-provided library.',
    )
    upgradeInsight: Optional[UpgradeInsight] = Field(
        None,
        description='An insight indicating that the hidden API usage originates from the use of a library that needs to be upgraded.',
    )
class Outcome(BaseModel):
    failureDetail: Optional[FailureDetail] = Field(
        None,
        description='More information about a FAILURE outcome. Returns INVALID_ARGUMENT if this field is set but the summary is not FAILURE. Optional',
    )
    inconclusiveDetail: Optional[InconclusiveDetail] = Field(
        None,
        description='More information about an INCONCLUSIVE outcome. Returns INVALID_ARGUMENT if this field is set but the summary is not INCONCLUSIVE. Optional',
    )
    skippedDetail: Optional[SkippedDetail] = Field(
        None,
        description='More information about a SKIPPED outcome. Returns INVALID_ARGUMENT if this field is set but the summary is not SKIPPED. Optional',
    )
    successDetail: Optional[SuccessDetail] = Field(
        None,
        description='More information about a SUCCESS outcome. Returns INVALID_ARGUMENT if this field is set but the summary is not SUCCESS. Optional',
    )
    summary: Optional[Summary] = Field(
        None, description='The simplest way to interpret a result. Required'
    )
class PerfMetricsSummary(BaseModel):
    appStartTime: Optional[AppStartTime] = None
    executionId: Optional[str] = Field(
        None, description='A tool results execution ID. @OutputOnly'
    )
    graphicsStats: Optional[GraphicsStats] = Field(
        None,
        description='Graphics statistics for the entire run. Statistics are reset at the beginning of the run and collected at the end of the run.',
    )
    historyId: Optional[str] = Field(
        None, description='A tool results history ID. @OutputOnly'
    )
    perfEnvironment: Optional[PerfEnvironment] = Field(
        None,
        description='Describes the environment in which the performance metrics were collected',
    )
    perfMetrics: Optional[List[PerfMetric]] = Field(
        None, description='Set of resource collected'
    )
    projectId: Optional[str] = Field(None, description='The cloud project @OutputOnly')
    stepId: Optional[str] = Field(
        None, description='A tool results step ID. @OutputOnly'
    )
class PerfSample(BaseModel):
    sampleTime: Optional[Timestamp] = Field(
        None, description='Timestamp of collection.'
    )
    value: Optional[float] = Field(None, description='Value observed')
class Specification(BaseModel):
    androidTest: Optional[AndroidTest] = Field(
        None, description='An Android mobile test execution specification.'
    )
    iosTest: Optional[IosTest] = Field(
        None, description='An iOS mobile test execution specification.'
    )
class SuggestionClusterProto(BaseModel):
    category: Optional[Category] = Field(
        None,
        description='Category in which these types of suggestions should appear. Always set.',
    )
    suggestions: Optional[List[SuggestionProto]] = Field(
        None,
        description='A sequence of suggestions. All of the suggestions within a cluster must have the same SuggestionPriority and belong to the same SuggestionCategory. Suggestions with the same screenshot URL should be adjacent.',
    )
class TestCase(BaseModel):
    elapsedTime: Optional[Duration] = Field(
        None, description='The elapsed run time of the test case. Required.'
    )
    endTime: Optional[Timestamp] = Field(
        None, description='The end time of the test case.'
    )
    skippedMessage: Optional[str] = Field(
        None,
        description='Why the test case was skipped. Present only for skipped test case',
    )
    stackTraces: Optional[List[StackTrace]] = Field(
        None,
        description='The stack trace details if the test case failed or encountered an error. The maximum size of the stack traces is 100KiB, beyond which the stack track will be truncated. Zero if the test case passed.',
    )
    startTime: Optional[Timestamp] = Field(
        None, description='The start time of the test case.'
    )
    status: Optional[Status1] = Field(
        None, description='The status of the test case. Required.'
    )
    testCaseId: Optional[str] = Field(
        None, description='A unique identifier within a Step for this Test Case.'
    )
    testCaseReference: Optional[TestCaseReference] = Field(
        None,
        description='Test case reference, e.g. name, class name and test suite name. Required.',
    )
    toolOutputs: Optional[List[ToolOutputReference]] = Field(
        None,
        description='References to opaque files of any format output by the tool execution. @OutputOnly',
    )
class ToolExecution(BaseModel):
    commandLineArguments: Optional[List[str]] = Field(
        None,
        description='The full tokenized command line including the program name (equivalent to argv in a C program). - In response: present if set by create request - In create request: optional - In update request: never set',
    )
    exitCode: Optional[ToolExitCode] = Field(
        None,
        description='Tool execution exit code. This field will be set once the tool has exited. - In response: present if set by create/update request - In create request: optional - In update request: optional, a FAILED_PRECONDITION error will be returned if an exit_code is already set.',
    )
    toolLogs: Optional[List[FileReference]] = Field(
        None,
        description='References to any plain text logs output the tool execution. This field can be set before the tool has exited in order to be able to have access to a live view of the logs while the tool is running. The maximum allowed number of tool logs per step is 1000. - In response: present if set by create/update request - In create request: optional - In update request: optional, any value provided will be appended to the existing list',
    )
    toolOutputs: Optional[List[ToolOutputReference]] = Field(
        None,
        description='References to opaque files of any format output by the tool execution. The maximum allowed number of tool outputs per step is 1000. - In response: present if set by create/update request - In create request: optional - In update request: optional, any value provided will be appended to the existing list',
    )
class ToolExecutionStep(BaseModel):
    toolExecution: Optional[ToolExecution] = Field(
        None,
        description='A Tool execution. - In response: present if set by create/update request - In create/update request: optional',
    )
class BatchCreatePerfSamplesRequest(BaseModel):
    perfSamples: Optional[List[PerfSample]] = Field(
        None,
        description='The set of PerfSamples to create should not include existing timestamps',
    )
class BatchCreatePerfSamplesResponse(BaseModel):
    perfSamples: Optional[List[PerfSample]] = None
class Execution(BaseModel):
    completionTime: Optional[Timestamp] = Field(
        None,
        description='The time when the Execution status transitioned to COMPLETE. This value will be set automatically when state transitions to COMPLETE. - In response: set if the execution state is COMPLETE. - In create/update request: never set',
    )
    creationTime: Optional[Timestamp] = Field(
        None,
        description='The time when the Execution was created. This value will be set automatically when CreateExecution is called. - In response: always set - In create/update request: never set',
    )
    dimensionDefinitions: Optional[List[MatrixDimensionDefinition]] = Field(
        None,
        description='The dimensions along which different steps in this execution may vary. This must remain fixed over the life of the execution. Returns INVALID_ARGUMENT if this field is set in an update request. Returns INVALID_ARGUMENT if the same name occurs in more than one dimension_definition. Returns INVALID_ARGUMENT if the size of the list is over 100. - In response: present if set by create - In create request: optional - In update request: never set',
    )
    executionId: Optional[str] = Field(
        None,
        description='A unique identifier within a History for this Execution. Returns INVALID_ARGUMENT if this field is set or overwritten by the caller. - In response always set - In create/update request: never set',
    )
    outcome: Optional[Outcome] = Field(
        None,
        description='Classify the result, for example into SUCCESS or FAILURE - In response: present if set by create/update request - In create/update request: optional',
    )
    specification: Optional[Specification] = Field(
        None,
        description='Lightweight information about execution request. - In response: present if set by create - In create: optional - In update: optional',
    )
    state: Optional[State] = Field(
        None,
        description='The initial state is IN_PROGRESS. The only legal state transitions is from IN_PROGRESS to COMPLETE. A PRECONDITION_FAILED will be returned if an invalid transition is requested. The state can only be set to COMPLETE once. A FAILED_PRECONDITION will be returned if the state is set to COMPLETE multiple times. If the state is set to COMPLETE, all the in-progress steps within the execution will be set as COMPLETE. If the outcome of the step is not set, the outcome will be set to INCONCLUSIVE. - In response always set - In create/update request: optional',
    )
    testExecutionMatrixId: Optional[str] = Field(
        None,
        description='TestExecution Matrix ID that the TestExecutionService uses. - In response: present if set by create - In create: optional - In update: never set',
    )
class ListExecutionsResponse(BaseModel):
    executions: Optional[List[Execution]] = Field(
        None, description='Executions. Always set.'
    )
    nextPageToken: Optional[str] = Field(
        None,
        description='A continuation token to resume the query at the next item. Will only be set if there are more Executions to fetch.',
    )
class ListPerfSamplesResponse(BaseModel):
    nextPageToken: Optional[str] = Field(
        None,
        description='Optional, returned if result size exceeds the page size specified in the request (or the default page size, 500, if unspecified). It indicates the last sample timestamp to be used as page_token in subsequent request',
    )
    perfSamples: Optional[List[PerfSample]] = None
class ListStepAccessibilityClustersResponse(BaseModel):
    clusters: Optional[List[SuggestionClusterProto]] = Field(
        None,
        description='A sequence of accessibility suggestions, grouped into clusters. Within the sequence, clusters that belong to the same SuggestionCategory should be adjacent. Within each category, clusters should be ordered by their SuggestionPriority (ERRORs first). The categories should be ordered by their highest priority cluster.',
    )
    name: Optional[str] = Field(
        None,
        description='A full resource name of the step. For example, projects/my-project/histories/bh.1234567890abcdef/executions/ 1234567890123456789/steps/bs.1234567890abcdef Always presents.',
    )
class ListTestCasesResponse(BaseModel):
    nextPageToken: Optional[str] = None
    testCases: Optional[List[TestCase]] = Field(None, description='List of test cases.')
class MergedResult(BaseModel):
    outcome: Optional[Outcome] = Field(None, description='Outcome of the resource')
    state: Optional[State] = Field(None, description='State of the resource')
    testSuiteOverviews: Optional[List[TestSuiteOverview]] = Field(
        None,
        description='The combined and rolled-up result of each test suite that was run as part of this environment. Combining: When the test cases from a suite are run in different steps (sharding), the results are added back together in one overview. (e.g., if shard1 has 2 failures and shard2 has 1 failure than the overview failure_count = 3). Rollup: When test cases from the same suite are run multiple times (flaky), the results are combined (e.g., if testcase1.run1 fails, testcase1.run2 passes, and both testcase2.run1 and testcase2.run2 fail then the overview flaky_count = 1 and failure_count = 1).',
    )
class NonSdkApi(BaseModel):
    apiSignature: Optional[str] = Field(
        None, description='The signature of the Non-SDK API'
    )
    exampleStackTraces: Optional[List[str]] = Field(
        None, description='Example stack traces of this API being called.'
    )
    insights: Optional[List[NonSdkApiInsight]] = Field(
        None, description='Optional debugging insights for non-SDK API violations.'
    )
    invocationCount: Optional[int] = Field(
        None,
        description='The total number of times this API was observed to have been called.',
    )
    list: Optional[ListModel] = Field(
        None, description='Which list this API appears on'
    )
class NonSdkApiUsageViolationReport(BaseModel):
    exampleApis: Optional[List[NonSdkApi]] = Field(
        None, description='Examples of the detected API usages.'
    )
    minSdkVersion: Optional[int] = Field(
        None, description='Minimum API level required for the application to run.'
    )
    targetSdkVersion: Optional[int] = Field(
        None,
        description='Specifies the API Level on which the application is designed to run.',
    )
    uniqueApis: Optional[int] = Field(
        None, description="Total number of unique Non-SDK API's accessed."
    )
class ShardSummary(BaseModel):
    runs: Optional[List[StepSummary]] = Field(
        None,
        description='Summaries of the steps belonging to the shard. With flaky_test_attempts enabled from TestExecutionService, more than one run (Step) can present. And the runs will be sorted by multistep_number.',
    )
    shardResult: Optional[MergedResult] = Field(
        None, description='Merged result of the shard.'
    )
class TestExecutionStep(BaseModel):
    testIssues: Optional[List[TestIssue]] = Field(
        None,
        description='Issues observed during the test execution. For example, if the mobile app under test crashed during the test, the error message and the stack trace content can be recorded here to assist debugging. - In response: present if set by create or update - In create/update request: optional',
    )
    testSuiteOverviews: Optional[List[TestSuiteOverview]] = Field(
        None,
        description='List of test suite overview contents. This could be parsed from xUnit XML log by server, or uploaded directly by user. This references should only be called when test suites are fully parsed or uploaded. The maximum allowed number of test suite overviews per step is 1000. - In response: always set - In create request: optional - In update request: never (use publishXunitXmlFiles custom method instead)',
    )
    testTiming: Optional[TestTiming] = Field(
        None,
        description='The timing break down of the test execution. - In response: present if set by create or update - In create/update request: optional',
    )
    toolExecution: Optional[ToolExecution] = Field(
        None,
        description='Represents the execution of the test runner. The exit code of this tool will be used to determine if the test passed. - In response: always set - In create/update request: optional',
    )
class Environment(BaseModel):
    completionTime: Optional[Timestamp] = Field(
        None,
        description='Output only. The time when the Environment status was set to complete. This value will be set automatically when state transitions to COMPLETE.',
    )
    creationTime: Optional[Timestamp] = Field(
        None, description='Output only. The time when the Environment was created.'
    )
    dimensionValue: Optional[List[EnvironmentDimensionValueEntry]] = Field(
        None,
        description='Dimension values describing the environment. Dimension values always consist of "Model", "Version", "Locale", and "Orientation". - In response: always set - In create request: always set - In update request: never set',
    )
    displayName: Optional[str] = Field(
        None,
        description='A short human-readable name to display in the UI. Maximum of 100 characters. For example: Nexus 5, API 27.',
    )
    environmentId: Optional[str] = Field(
        None, description='Output only. An Environment id.'
    )
    environmentResult: Optional[MergedResult] = Field(
        None, description='Merged result of the environment.'
    )
    executionId: Optional[str] = Field(
        None, description='Output only. An Execution id.'
    )
    historyId: Optional[str] = Field(None, description='Output only. A History id.')
    projectId: Optional[str] = Field(None, description='Output only. A Project id.')
    resultsStorage: Optional[ResultsStorage] = Field(
        None,
        description='The location where output files are stored in the user bucket.',
    )
    shardSummaries: Optional[List[ShardSummary]] = Field(
        None,
        description='Output only. Summaries of shards. Only one shard will present unless sharding feature is enabled in TestExecutionService.',
    )
class ListEnvironmentsResponse(BaseModel):
    environments: Optional[List[Environment]] = Field(
        None, description='Environments. Always set.'
    )
    executionId: Optional[str] = Field(None, description='A Execution id Always set.')
    historyId: Optional[str] = Field(None, description='A History id. Always set.')
    nextPageToken: Optional[str] = Field(
        None,
        description='A continuation token to resume the query at the next item. Will only be set if there are more Environments to fetch.',
    )
    projectId: Optional[str] = Field(None, description='A Project id. Always set.')
class Step(BaseModel):
    completionTime: Optional[Timestamp] = Field(
        None,
        description='The time when the step status was set to complete. This value will be set automatically when state transitions to COMPLETE. - In response: set if the execution state is COMPLETE. - In create/update request: never set',
    )
    creationTime: Optional[Timestamp] = Field(
        None,
        description='The time when the step was created. - In response: always set - In create/update request: never set',
    )
    description: Optional[str] = Field(
        None,
        description='A description of this tool For example: mvn clean package -D skipTests=true - In response: present if set by create/update request - In create/update request: optional',
    )
    deviceUsageDuration: Optional[Duration] = Field(
        None,
        description="How much the device resource is used to perform the test. This is the device usage used for billing purpose, which is different from the run_duration, for example, infrastructure failure won't be charged for device usage. PRECONDITION_FAILED will be returned if one attempts to set a device_usage on a step which already has this field set. - In response: present if previously set. - In create request: optional - In update request: optional",
    )
    dimensionValue: Optional[List[StepDimensionValueEntry]] = Field(
        None,
        description="If the execution containing this step has any dimension_definition set, then this field allows the child to specify the values of the dimensions. The keys must exactly match the dimension_definition of the execution. For example, if the execution has `dimension_definition = ['attempt', 'device']` then a step must define values for those dimensions, eg. `dimension_value = ['attempt': '1', 'device': 'Nexus 6']` If a step does not participate in one dimension of the matrix, the value for that dimension should be empty string. For example, if one of the tests is executed by a runner which does not support retries, the step could have `dimension_value = ['attempt': '', 'device': 'Nexus 6']` If the step does not participate in any dimensions of the matrix, it may leave dimension_value unset. A PRECONDITION_FAILED will be returned if any of the keys do not exist in the dimension_definition of the execution. A PRECONDITION_FAILED will be returned if another step in this execution already has the same name and dimension_value, but differs on other data fields, for example, step field is different. A PRECONDITION_FAILED will be returned if dimension_value is set, and there is a dimension_definition in the execution which is not specified as one of the keys. - In response: present if set by create - In create request: optional - In update request: never set",
    )
    hasImages: Optional[bool] = Field(
        None,
        description='Whether any of the outputs of this step are images whose thumbnails can be fetched with ListThumbnails. - In response: always set - In create/update request: never set',
    )
    labels: Optional[List[StepLabelsEntry]] = Field(
        None,
        description="Arbitrary user-supplied key/value pairs that are associated with the step. Users are responsible for managing the key namespace such that keys don't accidentally collide. An INVALID_ARGUMENT will be returned if the number of labels exceeds 100 or if the length of any of the keys or values exceeds 100 characters. - In response: always set - In create request: optional - In update request: optional; any new key/value pair will be added to the map, and any new value for an existing key will update that key's value",
    )
    multiStep: Optional[MultiStep] = Field(
        None,
        description="Details when multiple steps are run with the same configuration as a group. These details can be used identify which group this step is part of. It also identifies the groups 'primary step' which indexes all the group members. - In response: present if previously set. - In create request: optional, set iff this step was performed more than once. - In update request: optional",
    )
    name: Optional[str] = Field(
        None,
        description='A short human-readable name to display in the UI. Maximum of 100 characters. For example: Clean build A PRECONDITION_FAILED will be returned upon creating a new step if it shares its name and dimension_value with an existing step. If two steps represent a similar action, but have different dimension values, they should share the same name. For instance, if the same set of tests is run on two different platforms, the two steps should have the same name. - In response: always set - In create request: always set - In update request: never set',
    )
    outcome: Optional[Outcome] = Field(
        None,
        description='Classification of the result, for example into SUCCESS or FAILURE - In response: present if set by create/update request - In create/update request: optional',
    )
    runDuration: Optional[Duration] = Field(
        None,
        description='How long it took for this step to run. If unset, this is set to the difference between creation_time and completion_time when the step is set to the COMPLETE state. In some cases, it is appropriate to set this value separately: For instance, if a step is created, but the operation it represents is queued for a few minutes before it executes, it would be appropriate not to include the time spent queued in its run_duration. PRECONDITION_FAILED will be returned if one attempts to set a run_duration on a step which already has this field set. - In response: present if previously set; always present on COMPLETE step - In create request: optional - In update request: optional',
    )
    state: Optional[State] = Field(
        None,
        description='The initial state is IN_PROGRESS. The only legal state transitions are * IN_PROGRESS -> COMPLETE A PRECONDITION_FAILED will be returned if an invalid transition is requested. It is valid to create Step with a state set to COMPLETE. The state can only be set to COMPLETE once. A PRECONDITION_FAILED will be returned if the state is set to COMPLETE multiple times. - In response: always set - In create/update request: optional',
    )
    stepId: Optional[str] = Field(
        None,
        description='A unique identifier within a Execution for this Step. Returns INVALID_ARGUMENT if this field is set or overwritten by the caller. - In response: always set - In create/update request: never set',
    )
    testExecutionStep: Optional[TestExecutionStep] = Field(
        None, description='An execution of a test runner.'
    )
    toolExecutionStep: Optional[ToolExecutionStep] = Field(
        None,
        description="An execution of a tool (used for steps we don't explicitly support).",
    )
class ListStepsResponse(BaseModel):
    nextPageToken: Optional[str] = Field(
        None,
        description='A continuation token to resume the query at the next item. If set, indicates that there are more steps to read, by calling list again with this value in the page_token field.',
    )
    steps: Optional[List[Step]] = Field(None, description='Steps.')